@aics/vole-core 4.4.1 → 4.6.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/es/FusedChannelData.js +2 -2
- package/es/Line3d.js +80 -9
- package/es/SubrangeLineMaterial.js +71 -0
- package/es/constants/denoiseShader.js +1 -1
- package/es/constants/pathtraceOutputShader.js +1 -1
- package/es/constants/volumePTshader.js +1 -1
- package/es/constants/volumeRayMarchPickShader.js +1 -1
- package/es/constants/volumeRayMarchShader.js +1 -1
- package/es/constants/volumeSliceShader.js +1 -1
- package/es/loaders/JsonImageInfoLoader.js +3 -2
- package/es/loaders/OmeZarrLoader.js +12 -5
- package/es/loaders/TiffLoader.js +2 -1
- package/es/types/Line3d.d.ts +39 -3
- package/es/types/SubrangeLineMaterial.d.ts +33 -0
- package/es/types/loaders/TiffLoader.d.ts +1 -1
- package/es/types/utils/url_utils.d.ts +5 -0
- package/es/utils/url_utils.js +25 -0
- package/package.json +5 -1
package/es/FusedChannelData.js
CHANGED
|
@@ -3,9 +3,9 @@ import { renderToBufferVertShader } from "./constants/basicShaders.js";
|
|
|
3
3
|
/* babel-plugin-inline-import './constants/shaders/fuseUI.frag' */
|
|
4
4
|
const fuseShaderSrcUI = "precision highp float;\nprecision highp int;\nprecision highp usampler2D;\nprecision highp sampler3D;\n\n// the lut texture is a 256x1 rgba texture for each channel\nuniform sampler2D lutSampler;\n\nuniform vec2 lutMinMax;\nuniform uint highlightedId;\n\n// src texture is the raw volume intensity data\nuniform usampler2D srcTexture;\n\nvoid main() {\n ivec2 vUv = ivec2(int(gl_FragCoord.x), int(gl_FragCoord.y));\n uint intensity = texelFetch(srcTexture, vUv, 0).r;\n float ilookup = float(float(intensity) - lutMinMax.x) / float(lutMinMax.y - lutMinMax.x);\n // apply lut to intensity:\n vec4 pix = texture(lutSampler, vec2(ilookup, 0.5));\n gl_FragColor = vec4(pix.xyz * pix.w, pix.w);\n}\n";
|
|
5
5
|
/* babel-plugin-inline-import './constants/shaders/fuseF.frag' */
|
|
6
|
-
const fuseShaderSrcF = "precision highp float;\nprecision highp int;\nprecision highp sampler2D;\nprecision highp sampler3D;\n\n// the lut texture is a 256x1 rgba texture for each channel\nuniform sampler2D lutSampler;\n\nuniform vec2 lutMinMax;\n\n// src texture is the raw volume intensity data\nuniform sampler2D srcTexture;\n\nvoid main()
|
|
6
|
+
const fuseShaderSrcF = "precision highp float;\nprecision highp int;\nprecision highp sampler2D;\nprecision highp sampler3D;\n\n// the lut texture is a 256x1 rgba texture for each channel\nuniform sampler2D lutSampler;\n\nuniform vec2 lutMinMax;\n\n// src texture is the raw volume intensity data\nuniform sampler2D srcTexture;\n\nvoid main() {\n ivec2 vUv = ivec2(int(gl_FragCoord.x), int(gl_FragCoord.y));\n\n // load from channel\n float intensity = texelFetch(srcTexture, vUv, 0).r;\n\n float ilookup = float(float(intensity) - lutMinMax.x) / float(lutMinMax.y - lutMinMax.x);\n // apply lut to intensity:\n vec4 pix = texture(lutSampler, vec2(ilookup, 0.5));\n gl_FragColor = vec4(pix.xyz * pix.w, pix.w);\n}\n";
|
|
7
7
|
/* babel-plugin-inline-import './constants/shaders/fuseI.frag' */
|
|
8
|
-
const fuseShaderSrcI = "precision highp float;\nprecision highp int;\nprecision highp sampler2D;\nprecision highp sampler3D;\n\n// the lut texture is a 256x1 rgba texture for each channel\nuniform sampler2D lutSampler;\n\nuniform vec2 lutMinMax;\n\n// src texture is the raw volume intensity data\nuniform isampler2D srcTexture;\n\nvoid main()
|
|
8
|
+
const fuseShaderSrcI = "precision highp float;\nprecision highp int;\nprecision highp sampler2D;\nprecision highp sampler3D;\n\n// the lut texture is a 256x1 rgba texture for each channel\nuniform sampler2D lutSampler;\n\nuniform vec2 lutMinMax;\n\n// src texture is the raw volume intensity data\nuniform isampler2D srcTexture;\n\nvoid main() {\n ivec2 vUv = ivec2(int(gl_FragCoord.x), int(gl_FragCoord.y));\n int intensity = texelFetch(srcTexture, vUv, 0).r;\n float ilookup = float(float(intensity) - lutMinMax.x) / float(lutMinMax.y - lutMinMax.x);\n // apply lut to intensity:\n vec4 pix = texture(lutSampler, vec2(ilookup, 0.5));\n gl_FragColor = vec4(pix.xyz * pix.w, pix.w);\n}\n";
|
|
9
9
|
/* babel-plugin-inline-import './constants/shaders/colorizeUI.frag' */
|
|
10
10
|
const colorizeSrcUI = "precision highp float;\nprecision highp int;\nprecision highp usampler2D;\nprecision highp sampler3D;\n\nuniform sampler2D featureData;\n/** Min and max feature values that define the endpoints of the color map. Values\n * outside the range will be clamped to the nearest endpoint.\n */\nuniform float featureColorRampMin;\nuniform float featureColorRampMax;\nuniform sampler2D colorRamp;\nuniform usampler2D inRangeIds;\nuniform usampler2D outlierData;\n\n// TODO: Rename to `localId` for consistency\n/**\n * LUT mapping from the segmentation ID (raw pixel value) to the\n * global ID (index in data buffers like `featureData` and `outlierData`).\n * \n * For a given segmentation ID `segId`, the global ID is given by:\n * `segIdToGlobalId[segId - segIdOffset] - 1`.\n*/\nuniform usampler2D segIdToGlobalId;\nuniform uint segIdOffset;\n\nuniform vec3 outlineColor;\n\n/** MUST be synchronized with the DrawMode enum in ColorizeCanvas! */\nconst uint DRAW_MODE_HIDE = 0u;\nconst uint DRAW_MODE_COLOR = 1u;\nconst uint BACKGROUND_ID = 0u;\nconst uint MISSING_DATA_ID = 0xFFFFFFFFu;\n\nuniform vec3 outlierColor;\nuniform uint outlierDrawMode;\nuniform vec3 outOfRangeColor;\nuniform uint outOfRangeDrawMode;\n\nuniform uint highlightedId;\n\nuniform bool useRepeatingCategoricalColors;\n\n// src texture is the raw volume intensity data\nuniform usampler2D srcTexture;\n\nvec4 getFloatFromTex(sampler2D tex, int index) {\n int width = textureSize(tex, 0).x;\n ivec2 featurePos = ivec2(index % width, index / width);\n return texelFetch(tex, featurePos, 0);\n}\nuvec4 getUintFromTex(usampler2D tex, int index) {\n int width = textureSize(tex, 0).x;\n ivec2 featurePos = ivec2(index % width, index / width);\n return texelFetch(tex, featurePos, 0);\n}\nuint getId(ivec2 uv) {\n uint rawId = texelFetch(srcTexture, uv, 0).r;\n if (rawId == 0u) {\n return BACKGROUND_ID;\n }\n uvec4 c = getUintFromTex(segIdToGlobalId, int(rawId - segIdOffset));\n // Note: IDs are offset by `1` to reserve `0` for segmentations that don't\n // have associated data. `1` MUST be subtracted from the ID when accessing\n // data buffers.\n uint globalId = c.r;\n if (globalId == 0u) {\n return MISSING_DATA_ID;\n }\n return globalId;\n}\n\nvec4 getColorRamp(float val) {\n float width = float(textureSize(colorRamp, 0).x);\n float range = (width - 1.0) / width;\n float adjustedVal = (0.5 / width) + (val * range);\n return texture(colorRamp, vec2(adjustedVal, 0.5));\n}\n\nvec4 getCategoricalColor(float featureValue) {\n float width = float(textureSize(colorRamp, 0).x);\n float modValue = mod(featureValue, width);\n // The categorical texture uses no interpolation, so when sampling, `modValue`\n // is rounded to the nearest integer.\n return getColorRamp(modValue / (width - 1.0));\n}\n\nvec4 getColorFromDrawMode(uint drawMode, vec3 defaultColor) {\n const uint DRAW_MODE_HIDE = 0u;\n vec3 backgroundColor = vec3(0.0, 0.0, 0.0);\n if (drawMode == DRAW_MODE_HIDE) {\n return vec4(backgroundColor, 0.0);\n } else {\n return vec4(defaultColor, 1.0);\n }\n}\n\nfloat getFeatureVal(uint id) {\n // Data buffer starts at 0, non-background segmentation IDs start at 1\n return getFloatFromTex(featureData, int(id) - 1).r;\n}\nuint getOutlierVal(uint id) {\n // Data buffer starts at 0, non-background segmentation IDs start at 1\n return getUintFromTex(outlierData, int(id) - 1).r;\n}\nbool getIsInRange(uint id) {\n return getUintFromTex(inRangeIds, int(id) - 1).r == 1u;\n}\nbool getIsOutlier(float featureVal, uint outlierVal) {\n return isinf(featureVal) || outlierVal != 0u;\n}\n\nvec4 getObjectColor(ivec2 sUv, float opacity) {\n // Get the segmentation id at this pixel\n uint id = getId(sUv);\n\n // A segmentation id of 0 represents background\n if (id == BACKGROUND_ID) {\n return vec4(0, 0, 0, 0);\n }\n\n // color the highlighted object. Note, `highlightedId` is a 0-based index\n // (global ID w/o offset), while `id` is a 1-based index.\n // if (id - 1u == highlightedId) {\n // return vec4(outlineColor, 1.0);\n // }\n\n float featureVal = getFeatureVal(id);\n uint outlierVal = getOutlierVal(id);\n float normFeatureVal = (featureVal - featureColorRampMin) / (featureColorRampMax - featureColorRampMin);\n\n // Use the selected draw mode to handle out of range and outlier values;\n // otherwise color with the color ramp as usual.\n bool isInRange = getIsInRange(id);\n bool isOutlier = getIsOutlier(featureVal, outlierVal);\n bool isMissingData = (id == MISSING_DATA_ID);\n\n // Features outside the filtered/thresholded range will all be treated the same (use `outOfRangeDrawColor`).\n // Features inside the range can either be outliers or standard values, and are colored accordingly.\n vec4 color;\n if (isMissingData) { \n // TODO: Add color controls for missing data\n color = getColorFromDrawMode(outlierDrawMode, outlierColor);\n } else if (isInRange) {\n if (isOutlier) {\n color = getColorFromDrawMode(outlierDrawMode, outlierColor);\n } else if (useRepeatingCategoricalColors) {\n color = getCategoricalColor(featureVal);\n } else {\n color = getColorRamp(normFeatureVal);\n }\n } else {\n color = getColorFromDrawMode(outOfRangeDrawMode, outOfRangeColor);\n }\n color.a *= opacity;\n return color;\n}\n\nvoid main() {\n ivec2 vUv = ivec2(int(gl_FragCoord.x), int(gl_FragCoord.y));\n gl_FragColor = getObjectColor(vUv, 1.0);\n}\n";
|
|
11
11
|
// This is the owner of the fused RGBA volume texture atlas, and the mask texture atlas.
|
package/es/Line3d.js
CHANGED
|
@@ -1,8 +1,9 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { Color, DataTexture, LinearFilter } from "three";
|
|
2
2
|
import { LineSegments2 } from "three/addons/lines/LineSegments2.js";
|
|
3
3
|
import { LineSegmentsGeometry } from "three/addons/lines/LineSegmentsGeometry.js";
|
|
4
4
|
import { MESH_NO_PICK_OCCLUSION_LAYER, OVERLAY_LAYER } from "./ThreeJsPanel.js";
|
|
5
5
|
import BaseDrawableMeshObject from "./BaseDrawableMeshObject.js";
|
|
6
|
+
import SubrangeLineMaterial from "./SubrangeLineMaterial.js";
|
|
6
7
|
const DEFAULT_VERTEX_BUFFER_SIZE = 1020;
|
|
7
8
|
|
|
8
9
|
/**
|
|
@@ -13,14 +14,17 @@ export default class Line3d extends BaseDrawableMeshObject {
|
|
|
13
14
|
constructor() {
|
|
14
15
|
super();
|
|
15
16
|
this.bufferSize = DEFAULT_VERTEX_BUFFER_SIZE;
|
|
17
|
+
this.useVertexColors = false;
|
|
18
|
+
this.useColorRamp = false;
|
|
19
|
+
this.colorRampTexture = null;
|
|
16
20
|
const geometry = new LineSegmentsGeometry();
|
|
17
21
|
geometry.setPositions(new Float32Array(this.bufferSize));
|
|
18
|
-
|
|
19
|
-
color: "#
|
|
22
|
+
this.lineMaterial = new SubrangeLineMaterial({
|
|
23
|
+
color: "#fff",
|
|
20
24
|
linewidth: 2,
|
|
21
25
|
worldUnits: false
|
|
22
26
|
});
|
|
23
|
-
this.lineMesh = new LineSegments2(geometry,
|
|
27
|
+
this.lineMesh = new LineSegments2(geometry, this.lineMaterial);
|
|
24
28
|
|
|
25
29
|
// Lines need to write depth information so they interact with the volume
|
|
26
30
|
// (so the lines appear to fade into the volume if they intersect), but
|
|
@@ -36,16 +40,65 @@ export default class Line3d extends BaseDrawableMeshObject {
|
|
|
36
40
|
|
|
37
41
|
// Line-specific functions
|
|
38
42
|
|
|
43
|
+
updateVertexColorFlag() {
|
|
44
|
+
this.lineMesh.material.vertexColors = this.useVertexColors || this.useColorRamp;
|
|
45
|
+
this.lineMesh.material.needsUpdate = true;
|
|
46
|
+
}
|
|
47
|
+
|
|
39
48
|
/**
|
|
40
49
|
* Sets the color of the line material.
|
|
41
50
|
* @param color Base line color.
|
|
42
|
-
* @param useVertexColors If true,
|
|
43
|
-
* the per-vertex colors defined in the geometry (see `setLineVertexData`).
|
|
51
|
+
* @param useVertexColors If true, the line will multiply the base color with
|
|
52
|
+
* the per-vertex colors defined in the geometry (see `setLineVertexData`).
|
|
53
|
+
* Default is `false`.
|
|
44
54
|
*/
|
|
45
55
|
setColor(color, useVertexColors = false) {
|
|
46
56
|
this.lineMesh.material.color.set(color);
|
|
47
|
-
this.
|
|
48
|
-
this.
|
|
57
|
+
this.useVertexColors = useVertexColors;
|
|
58
|
+
this.updateVertexColorFlag();
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
/**
|
|
62
|
+
* Returns a new DataTexture representing the color stops in LinearSRGB color
|
|
63
|
+
* space.
|
|
64
|
+
*/
|
|
65
|
+
static colorStopsToTexture(colorStops) {
|
|
66
|
+
const colors = colorStops.map(c => new Color(c));
|
|
67
|
+
const linearSRGBDataArr = colors.flatMap(col => {
|
|
68
|
+
return [col.r, col.g, col.b, 1];
|
|
69
|
+
});
|
|
70
|
+
const texture = new DataTexture(new Float32Array(linearSRGBDataArr), colors.length, 1);
|
|
71
|
+
texture.minFilter = texture.magFilter = LinearFilter;
|
|
72
|
+
texture.internalFormat = "RGBA32F";
|
|
73
|
+
texture.needsUpdate = true;
|
|
74
|
+
return texture;
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
/**
|
|
78
|
+
* Sets the color ramp used for coloring the line. Note that the color will be
|
|
79
|
+
* multiplied by the base color defined in `setColor()`.
|
|
80
|
+
* @param colorStops Array of hex color stop strings.
|
|
81
|
+
* @param useColorRamp If true, the line will use the color ramp for coloring.
|
|
82
|
+
* Default is `false`.
|
|
83
|
+
*/
|
|
84
|
+
setColorRamp(colorStops, useColorRamp = false) {
|
|
85
|
+
if (this.colorRampTexture) {
|
|
86
|
+
this.colorRampTexture.dispose();
|
|
87
|
+
}
|
|
88
|
+
this.colorRampTexture = Line3d.colorStopsToTexture(colorStops);
|
|
89
|
+
this.lineMaterial.colorRamp = this.colorRampTexture;
|
|
90
|
+
this.lineMaterial.useColorRamp = useColorRamp;
|
|
91
|
+
this.useColorRamp = useColorRamp;
|
|
92
|
+
this.updateVertexColorFlag();
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
/**
|
|
96
|
+
* Sets the scaling parameters for how the color ramp is applied. The color
|
|
97
|
+
* ramp will be centered at `vertexOffset` and span `vertexScale` vertices.
|
|
98
|
+
*/
|
|
99
|
+
setColorRampScale(vertexScale, vertexOffset) {
|
|
100
|
+
this.lineMaterial.colorRampVertexScale = vertexScale;
|
|
101
|
+
this.lineMaterial.colorRampVertexOffset = vertexOffset;
|
|
49
102
|
}
|
|
50
103
|
|
|
51
104
|
/**
|
|
@@ -120,11 +173,29 @@ export default class Line3d extends BaseDrawableMeshObject {
|
|
|
120
173
|
}
|
|
121
174
|
}
|
|
122
175
|
|
|
123
|
-
/**
|
|
176
|
+
/**
|
|
177
|
+
* Number of line segments that should be visible.
|
|
178
|
+
* @deprecated Use `setVisibleSegmentsRange` instead.
|
|
179
|
+
*/
|
|
124
180
|
setNumSegmentsVisible(segments) {
|
|
125
181
|
if (this.lineMesh.geometry) {
|
|
126
182
|
const count = segments;
|
|
127
183
|
this.lineMesh.geometry.instanceCount = Math.max(0, count);
|
|
128
184
|
}
|
|
129
185
|
}
|
|
186
|
+
|
|
187
|
+
/**
|
|
188
|
+
* Sets the range of line segments that are visible; line segments outside of
|
|
189
|
+
* the range will be hidden.
|
|
190
|
+
* @param startSegment Index of the segment at the start of the visible range
|
|
191
|
+
* (inclusive).
|
|
192
|
+
* @param endSegment Index of the segment at the end of the visible range
|
|
193
|
+
* (exclusive).
|
|
194
|
+
*/
|
|
195
|
+
setVisibleSegmentsRange(startSegment, endSegment) {
|
|
196
|
+
this.lineMaterial.minInstance = startSegment;
|
|
197
|
+
if (this.lineMesh.geometry) {
|
|
198
|
+
this.lineMesh.geometry.instanceCount = Math.max(0, endSegment);
|
|
199
|
+
}
|
|
200
|
+
}
|
|
130
201
|
}
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
import { DataTexture, FloatType, LinearFilter, RGBAFormat, UniformsUtils } from "three";
|
|
2
|
+
import { LineMaterial } from "three/addons/lines/LineMaterial";
|
|
3
|
+
/* babel-plugin-inline-import './constants/shaders/track.vert' */
|
|
4
|
+
const vertexShader = "// Adapted from three.js LineMaterial and modified to add minInstance uniform.\n// See https://github.com/mrdoob/three.js/blob/master/examples/jsm/lines/LineMaterial.js\n\n#include <common>\n#include <color_pars_vertex>\n#include <fog_pars_vertex>\n#include <logdepthbuf_pars_vertex>\n#include <clipping_planes_pars_vertex>\n\nuniform float linewidth;\nuniform vec2 resolution;\n\n// CHANGED FROM ORIGINAL \n// -----------------------------\nuniform int minInstance;\nuniform bool useColorRamp;\nuniform sampler2D colorRamp;\n/** The number of vertices that the color ramp spans. */\nuniform float colorRampVertexScale;\n/** The vertex index that will be assigned the middle of the color ramp. */\nuniform float colorRampVertexOffset;\n// -----------------------------\n\nattribute vec3 instanceStart;\nattribute vec3 instanceEnd;\n\nattribute vec3 instanceColorStart;\nattribute vec3 instanceColorEnd;\n\n#ifdef WORLD_UNITS\nvarying vec4 worldPos;\nvarying vec3 worldStart;\nvarying vec3 worldEnd;\n#ifdef USE_DASH\nvarying vec2 vUv;\n#endif\n\n#else\nvarying vec2 vUv;\n#endif\n\n#ifdef USE_DASH\nuniform float dashScale;\nattribute float instanceDistanceStart;\nattribute float instanceDistanceEnd;\nvarying float vLineDistance;\n#endif\n\nvoid trimSegment(const in vec4 start, inout vec4 end) {\n // trim end segment so it terminates between the camera plane and the near\n // plane\n // conservative estimate of the near plane\n float a = projectionMatrix[2][2]; // 3nd entry in 3th column\n float b = projectionMatrix[3][2]; // 3nd entry in 4th column\n float nearEstimate = -0.5 * b / a;\n float alpha = (nearEstimate - start.z) / (end.z - start.z);\n end.xyz = mix(start.xyz, end.xyz, alpha);\n}\n\nvoid main() {\n // CHANGED FROM ORIGINAL\n // -----------------------------\n #ifdef USE_COLOR\n if (useColorRamp) {\n // Determine the vertex index in the original line, using the current\n // instance ID and vertex ID. THREE's line segments have 8 vertices, where\n // the first 4 are at the end of the line segment and the last 4 are at the\n // start (determined experimentally).\n int lineVertexIdx = gl_InstanceID + 1;\n if (gl_VertexID >= 4) {\n lineVertexIdx -= 1;\n }\n // Map the vertex index to the range [0, 1] for color ramp lookup.\n float t = (float(lineVertexIdx) - colorRampVertexOffset) / colorRampVertexScale + 0.5;\n t = clamp(t, 0.0, 1.0);\n vColor.xyz = texture(colorRamp, vec2(t, 0.5)).xyz;\n } else {\n // Original default color behavior\n vColor.xyz = (position.y < 0.5) ? instanceColorStart : instanceColorEnd;\n }\n #endif\n\n // Cull instances below min instance\n if (gl_InstanceID < minInstance) {\n gl_Position = vec4(0.0, 0.0, 0.0, 0.0);\n return;\n }\n // -----------------------------\n\n #ifdef USE_DASH\n vLineDistance = (position.y < 0.5) ? dashScale * instanceDistanceStart : dashScale * instanceDistanceEnd;\n vUv = uv;\n #endif\n\n float aspect = resolution.x / resolution.y;\n\n // camera space\n vec4 start = modelViewMatrix * vec4(instanceStart, 1.0);\n vec4 end = modelViewMatrix * vec4(instanceEnd, 1.0);\n\n #ifdef WORLD_UNITS\n worldStart = start.xyz;\n worldEnd = end.xyz;\n #else\n vUv = uv;\n #endif\n\n // special case for perspective projection, and segments that terminate either\n // in, or behind, the camera plane. clearly the gpu firmware has a way of\n // addressing this issue when projecting into ndc space, but we need to\n // perform ndc-space calculations in the shader, so we must address this issue\n // directly. perhaps there is a more elegant solution -- WestLangley\n\n bool perspective = (projectionMatrix[2][3] == -1.0); // 4th entry in the 3rd column\n\n if (perspective) {\n if (start.z < 0.0 && end.z >= 0.0) {\n trimSegment(start, end);\n } else if (end.z < 0.0 && start.z >= 0.0) {\n trimSegment(end, start);\n }\n }\n\n // clip space\n vec4 clipStart = projectionMatrix * start;\n vec4 clipEnd = projectionMatrix * end;\n\n // ndc space\n vec3 ndcStart = clipStart.xyz / clipStart.w;\n vec3 ndcEnd = clipEnd.xyz / clipEnd.w;\n\n // direction\n vec2 dir = ndcEnd.xy - ndcStart.xy;\n\n // account for clip-space aspect ratio\n dir.x *= aspect;\n dir = normalize(dir);\n\n #ifdef WORLD_UNITS\n vec3 worldDir = normalize(end.xyz - start.xyz);\n vec3 tmpFwd = normalize(mix(start.xyz, end.xyz, 0.5));\n vec3 worldUp = normalize(cross(worldDir, tmpFwd));\n vec3 worldFwd = cross(worldDir, worldUp);\n worldPos = position.y < 0.5 ? start : end;\n\n // height offset\n float hw = linewidth * 0.5;\n worldPos.xyz += position.x < 0.0 ? hw * worldUp : -hw * worldUp;\n\n // don't extend the line if we're rendering dashes because we\n // won't be rendering the endcaps\n #ifndef USE_DASH\n // cap extension\n worldPos.xyz += position.y < 0.5 ? -hw * worldDir : hw * worldDir;\n // add width to the box\n worldPos.xyz += worldFwd * hw;\n // endcaps\n if (position.y > 1.0 || position.y < 0.0) {\n worldPos.xyz -= worldFwd * 2.0 * hw;\n }\n #endif\n\n // project the worldpos\n vec4 clip = projectionMatrix * worldPos;\n\n // shift the depth of the projected points so the line\n // segments overlap neatly\n vec3 clipPose = (position.y < 0.5) ? ndcStart : ndcEnd;\n clip.z = clipPose.z * clip.w;\n\n #else\n vec2 offset = vec2(dir.y, -dir.x);\n // undo aspect ratio adjustment\n dir.x /= aspect;\n offset.x /= aspect;\n\n // sign flip\n if (position.x < 0.0)\n offset *= -1.0;\n // endcaps\n if (position.y < 0.0) {\n offset += -dir;\n } else if (position.y > 1.0) {\n offset += dir;\n }\n // adjust for linewidth\n offset *= linewidth;\n // adjust for clip-space to screen-space conversion \n // maybe resolution should be based on viewport ...\n offset /= resolution.y;\n // select end\n vec4 clip = (position.y < 0.5) ? clipStart : clipEnd;\n // back to clip space\n offset *= clip.w;\n clip.xy += offset;\n #endif\n\n gl_Position = clip;\n\n vec4 mvPosition = (position.y < 0.5) ? start : end; // this is an approximation\n\n #include <logdepthbuf_vertex>\n #include <clipping_planes_vertex>\n #include <fog_vertex>\n}\n";
|
|
5
|
+
const PLACEHOLDER_COLOR_RAMP = new DataTexture(new Float32Array([0, 0, 0, 1, 1, 1]), 2, 1, RGBAFormat, FloatType);
|
|
6
|
+
PLACEHOLDER_COLOR_RAMP.minFilter = LinearFilter;
|
|
7
|
+
PLACEHOLDER_COLOR_RAMP.magFilter = LinearFilter;
|
|
8
|
+
PLACEHOLDER_COLOR_RAMP.internalFormat = "RGBA32F";
|
|
9
|
+
PLACEHOLDER_COLOR_RAMP.needsUpdate = true;
|
|
10
|
+
|
|
11
|
+
/**
|
|
12
|
+
* Replacement for LineMaterial with custom vertex shader to support showing
|
|
13
|
+
* only a subrange of line segments. Use with `instanceCount` on the geometry
|
|
14
|
+
* and the `minInstance` uniform to control the visible range.
|
|
15
|
+
*/
|
|
16
|
+
export default class SubrangeLineMaterial extends LineMaterial {
|
|
17
|
+
constructor(params) {
|
|
18
|
+
super(params);
|
|
19
|
+
this.vertexShader = vertexShader;
|
|
20
|
+
this.uniforms = UniformsUtils.merge([this.uniforms, {
|
|
21
|
+
minInstance: {
|
|
22
|
+
value: params?.minInstance ?? 0
|
|
23
|
+
},
|
|
24
|
+
useColorRamp: {
|
|
25
|
+
value: false
|
|
26
|
+
},
|
|
27
|
+
colorRamp: {
|
|
28
|
+
value: PLACEHOLDER_COLOR_RAMP
|
|
29
|
+
},
|
|
30
|
+
colorRampVertexScale: {
|
|
31
|
+
value: 1
|
|
32
|
+
},
|
|
33
|
+
colorRampVertexOffset: {
|
|
34
|
+
value: 0
|
|
35
|
+
}
|
|
36
|
+
}]);
|
|
37
|
+
this.uniformsNeedUpdate = true;
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
/**
|
|
41
|
+
* The minimum instance index to render, inclusive. Instances below this index
|
|
42
|
+
* will not be visible. Use with `instanceCount` on the geometry to show a
|
|
43
|
+
* subrange of line segments.
|
|
44
|
+
*/
|
|
45
|
+
set minInstance(value) {
|
|
46
|
+
this.uniforms.minInstance.value = value;
|
|
47
|
+
}
|
|
48
|
+
set useColorRamp(value) {
|
|
49
|
+
this.uniforms.useColorRamp.value = value;
|
|
50
|
+
}
|
|
51
|
+
set colorRamp(value) {
|
|
52
|
+
this.uniforms.colorRamp.value = value;
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
/** The number of vertices that the color ramp spans. */
|
|
56
|
+
set colorRampVertexScale(value) {
|
|
57
|
+
this.uniforms.colorRampVertexScale.value = value;
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
/**
|
|
61
|
+
* The vertex index that will be assigned the middle of the color ramp. Vertex
|
|
62
|
+
* indices start at 0 for the first vertex in the line segments geometry.
|
|
63
|
+
*
|
|
64
|
+
* For example, if the color ramp spans 10 vertices, setting
|
|
65
|
+
* `colorRampVertexOffset` to 5 will center the color ramp on the 5th vertex,
|
|
66
|
+
* with the starting color at vertex 0 and the ending color at vertex 10.
|
|
67
|
+
*/
|
|
68
|
+
set colorRampVertexOffset(value) {
|
|
69
|
+
this.uniforms.colorRampVertexOffset.value = value;
|
|
70
|
+
}
|
|
71
|
+
}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { Vector2 } from "three";
|
|
2
2
|
/* babel-plugin-inline-import './shaders/pathtrace_denoise.frag' */
|
|
3
|
-
const denoiseFragmentShader = "precision highp float;\nprecision highp int;\nprecision highp sampler2D;\n\nuniform float gInvExposure;\nuniform int gDenoiseWindowRadius;\nuniform float gDenoiseNoise;\nuniform float gDenoiseInvWindowArea;\nuniform float gDenoiseWeightThreshold;\nuniform float gDenoiseLerpThreshold;\nuniform float gDenoiseLerpC;\nuniform vec2 gDenoisePixelSize;\n\nuniform sampler2D tTexture0;\nin vec2 vUv;\n\n// Used to convert from XYZ to linear RGB space\nconst mat3 XYZ_2_RGB = (mat3(\n 3.2404542, -1.5371385, -0.4985314,\n -0.9692660, 1.8760108, 0.0415560,\n 0.0556434, -0.2040259, 1.0572252\n));\n\nvec3 XYZtoRGB(vec3 xyz) {\n return xyz * XYZ_2_RGB;\n}\n\nvoid main()
|
|
3
|
+
const denoiseFragmentShader = "precision highp float;\nprecision highp int;\nprecision highp sampler2D;\n\nuniform float gInvExposure;\nuniform int gDenoiseWindowRadius;\nuniform float gDenoiseNoise;\nuniform float gDenoiseInvWindowArea;\nuniform float gDenoiseWeightThreshold;\nuniform float gDenoiseLerpThreshold;\nuniform float gDenoiseLerpC;\nuniform vec2 gDenoisePixelSize;\n\nuniform sampler2D tTexture0;\nin vec2 vUv;\n\n// Used to convert from XYZ to linear RGB space\nconst mat3 XYZ_2_RGB = (mat3(\n 3.2404542, -1.5371385, -0.4985314,\n -0.9692660, 1.8760108, 0.0415560,\n 0.0556434, -0.2040259, 1.0572252\n));\n\nvec3 XYZtoRGB(vec3 xyz) {\n return xyz * XYZ_2_RGB;\n}\n\nvoid main() {\n vec4 pixelColor = texture(tTexture0, vUv);\n // TODO TONE MAP!!!!!!\n pixelColor.rgb = XYZtoRGB(pixelColor.rgb);\n\n pixelColor.rgb = 1.0 - exp(-pixelColor.rgb * gInvExposure);\n pixelColor = clamp(pixelColor, 0.0, 1.0);\n\n /////////////////////\n /////////////////////\n /////////////////////\n /////////////////////\n //// DENOISING FILTER\n /////////////////////\n // see https://developer.download.nvidia.com/compute/cuda/1.1-Beta/x86_website/projects/imageDenoising/doc/imageDenoising.pdf\n /////////////////////\n vec4 clr00 = pixelColor;\n\n float fCount = 0.0;\n float SumWeights = 0.0;\n vec3 clr = vec3(0.0, 0.0, 0.0);\n\n vec2 uvsample = vUv;\n vec3 rgbsample;\n for (int i = -gDenoiseWindowRadius; i <= gDenoiseWindowRadius; i++) {\n for (int j = -gDenoiseWindowRadius; j <= gDenoiseWindowRadius; j++) {\n\n // boundary checking?\n vec3 clrIJ = texture(tTexture0, vUv + vec2(float(i) / gDenoisePixelSize.x, float(j) / gDenoisePixelSize.y)).rgb;\n //vec3 clrIJ = texelFetch(tTexture0, ivec2(gl_FragCoord.xy) + ivec2(i,j), 0).rgb;\n\n rgbsample = XYZtoRGB(clrIJ);\n // tone map!\n rgbsample = 1.0 - exp(-rgbsample * gInvExposure);\n rgbsample = clamp(rgbsample, 0.0, 1.0);\n\n clrIJ = rgbsample;\n\n float distanceIJ = (clr00.x - clrIJ.x) * (clr00.x - clrIJ.x) + (clr00.y - clrIJ.y) * (clr00.y - clrIJ.y) + (clr00.z - clrIJ.z) * (clr00.z - clrIJ.z);\n\n // gDenoiseNoise = 1/h^2\n //\n float weightIJ = exp(-(distanceIJ * gDenoiseNoise + float(i * i + j * j) * gDenoiseInvWindowArea));\n\n clr += (clrIJ * weightIJ);\n\n SumWeights += weightIJ;\n\n fCount += (weightIJ > gDenoiseWeightThreshold) ? gDenoiseInvWindowArea : 0.0;\n }\n }\n\n SumWeights = 1.0 / SumWeights;\n\n clr.rgb *= SumWeights;\n\n float LerpQ = (fCount > gDenoiseLerpThreshold) ? gDenoiseLerpC : 1.0 - gDenoiseLerpC;\n\n clr.rgb = mix(clr.rgb, clr00.rgb, LerpQ);\n clr.rgb = clamp(clr.rgb, 0.0, 1.0);\n\n pc_fragColor = vec4(clr.rgb, clr00.a);\n}\n";
|
|
4
4
|
export const denoiseFragmentShaderSrc = denoiseFragmentShader;
|
|
5
5
|
const DENOISE_WINDOW_RADIUS = 3;
|
|
6
6
|
export const denoiseShaderUniforms = () => ({
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
/* babel-plugin-inline-import './shaders/pathtrace_output.frag' */
|
|
2
|
-
const pathtraceOutputFragmentShader = "precision highp float;\nprecision highp int;\nprecision highp sampler2D;\n\nuniform float gInvExposure;\nuniform sampler2D tTexture0;\nin vec2 vUv;\n\n// Used to convert from XYZ to linear RGB space\nconst mat3 XYZ_2_RGB = (mat3(\n 3.2404542, -1.5371385, -0.4985314,\n -0.9692660, 1.8760108, 0.0415560,\n 0.0556434, -0.2040259, 1.0572252\n));\n\nvec3 XYZtoRGB(vec3 xyz) {\n return xyz * XYZ_2_RGB;\n}\n\nvoid main() {\n vec4 pixelColor = texture(tTexture0, vUv);\n\n pixelColor.rgb = XYZtoRGB(pixelColor.rgb);\n\n // pixelColor.rgb = pow(pixelColor.rgb, vec3(1.0/2.2));\n pixelColor.rgb = 1.0-exp(-pixelColor.rgb*gInvExposure);\n pixelColor = clamp(pixelColor, 0.0, 1.0);\n\n pc_fragColor = pixelColor; // sqrt(pixelColor);\n // out_FragColor = pow(pixelColor, vec4(1.0/2.2));\n}\n";
|
|
2
|
+
const pathtraceOutputFragmentShader = "precision highp float;\nprecision highp int;\nprecision highp sampler2D;\n\nuniform float gInvExposure;\nuniform sampler2D tTexture0;\nin vec2 vUv;\n\n// Used to convert from XYZ to linear RGB space\nconst mat3 XYZ_2_RGB = (mat3(\n 3.2404542, -1.5371385, -0.4985314,\n -0.9692660, 1.8760108, 0.0415560,\n 0.0556434, -0.2040259, 1.0572252\n));\n\nvec3 XYZtoRGB(vec3 xyz) {\n return xyz * XYZ_2_RGB;\n}\n\nvoid main() {\n vec4 pixelColor = texture(tTexture0, vUv);\n\n pixelColor.rgb = XYZtoRGB(pixelColor.rgb);\n\n // pixelColor.rgb = pow(pixelColor.rgb, vec3(1.0/2.2));\n pixelColor.rgb = 1.0 - exp(-pixelColor.rgb * gInvExposure);\n pixelColor = clamp(pixelColor, 0.0, 1.0);\n\n pc_fragColor = pixelColor; // sqrt(pixelColor);\n // out_FragColor = pow(pixelColor, vec4(1.0/2.2));\n}\n";
|
|
3
3
|
export const pathtraceOutputFragmentShaderSrc = pathtraceOutputFragmentShader;
|
|
4
4
|
export const pathtraceOutputShaderUniforms = () => ({
|
|
5
5
|
gInvExposure: {
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { Texture, Vector2, Vector3, Vector4 } from "three";
|
|
2
2
|
import { Light, AREA_LIGHT, SKY_LIGHT } from "../Light.js";
|
|
3
3
|
/* babel-plugin-inline-import './shaders/pathtrace.frag' */
|
|
4
|
-
const pathTraceFragmentShader = "precision highp float;\nprecision highp int;\nprecision highp sampler2D;\nprecision highp sampler3D;\n\n#define PI (3.1415926535897932384626433832795)\n#define PI_OVER_2 (1.57079632679489661923)\n#define PI_OVER_4 (0.785398163397448309616)\n#define INV_PI (1.0/PI)\n#define INV_2_PI (0.5/PI)\n#define INV_4_PI (0.25/PI)\n\nconst vec3 BLACK = vec3(0,0,0);\nconst vec3 WHITE = vec3(1.0,1.0,1.0);\nconst int ShaderType_Brdf = 0;\nconst int ShaderType_Phase = 1;\nconst int ShaderType_Mixed = 2;\nconst float MAX_RAY_LEN = 1500000.0f;\n\nin vec2 vUv;\n\nstruct Camera {\n vec3 mFrom;\n vec3 mU, mV, mN;\n vec4 mScreen; // left, right, bottom, top\n vec2 mInvScreen; // 1/w, 1/h\n float mFocalDistance;\n float mApertureSize;\n float mIsOrtho; // 1 or 0\n};\n\nuniform Camera gCamera;\n\nstruct Light {\n float mTheta;\n float mPhi;\n float mWidth;\n float mHalfWidth;\n float mHeight;\n float mHalfHeight;\n float mDistance;\n float mSkyRadius;\n vec3 mP;\n vec3 mTarget;\n vec3 mN;\n vec3 mU;\n vec3 mV;\n float mArea;\n float mAreaPdf;\n vec3 mColor;\n vec3 mColorTop;\n vec3 mColorMiddle;\n vec3 mColorBottom;\n int mT;\n};\nconst int NUM_LIGHTS = 2;\nuniform Light gLights[2];\n\nuniform vec3 gClippedAaBbMin;\nuniform vec3 gClippedAaBbMax;\nuniform vec3 gVolCenter;\nuniform float gDensityScale;\nuniform float gStepSize;\nuniform float gStepSizeShadow;\nuniform sampler3D volumeTexture;\nuniform vec3 gInvAaBbMax;\nuniform int gNChannels;\nuniform int gShadingType;\nuniform vec3 gGradientDeltaX;\nuniform vec3 gGradientDeltaY;\nuniform vec3 gGradientDeltaZ;\nuniform float gInvGradientDelta;\nuniform float gGradientFactor;\nuniform float uShowLights;\nuniform vec3 flipVolume;\n\n// per channel\n// the luttexture is a 256x4 rgba texture\n// each row is a 256 element lookup table.\nuniform sampler2D gLutTexture;\nuniform vec4 gIntensityMax;\nuniform vec4 gIntensityMin;\nuniform float gOpacity[4];\nuniform vec3 gEmissive[4];\nuniform vec3 gDiffuse[4];\nuniform vec3 gSpecular[4];\nuniform float gGlossiness[4];\n\n// compositing / progressive render\nuniform float uFrameCounter;\nuniform float uSampleCounter;\nuniform vec2 uResolution;\nuniform sampler2D tPreviousTexture;\n\n// from iq https://www.shadertoy.com/view/4tXyWN\nfloat rand( inout uvec2 seed )\n{\n seed += uvec2(1);\n uvec2 q = 1103515245U * ( (seed >> 1U) ^ (seed.yx) );\n uint n = 1103515245U * ( (q.x) ^ (q.y >> 3U) );\n return float(n) * (1.0 / float(0xffffffffU));\n}\n\nvec3 XYZtoRGB(vec3 xyz) {\n return vec3(\n 3.240479f*xyz[0] - 1.537150f*xyz[1] - 0.498535f*xyz[2],\n -0.969256f*xyz[0] + 1.875991f*xyz[1] + 0.041556f*xyz[2],\n 0.055648f*xyz[0] - 0.204043f*xyz[1] + 1.057311f*xyz[2]\n );\n}\n\n// Used to convert from linear RGB to XYZ space\nconst mat3 RGB_2_XYZ = (mat3(\n 0.4124564, 0.3575761, 0.1804375,\n 0.2126729, 0.7151522, 0.0721750,\n 0.0193339, 0.1191920, 0.9503041\n));\nvec3 RGBtoXYZ(vec3 rgb) {\n return rgb * RGB_2_XYZ;\n}\n\nvec3 getUniformSphereSample(in vec2 U)\n{\n float z = 1.f - 2.f * U.x;\n float r = sqrt(max(0.f, 1.f - z*z));\n float phi = 2.f * PI * U.y;\n float x = r * cos(phi);\n float y = r * sin(phi);\n return vec3(x, y, z);\n}\n\nfloat SphericalPhi(in vec3 Wl)\n{\n float p = atan(Wl.z, Wl.x);\n return (p < 0.f) ? p + 2.f * PI : p;\n}\n\nfloat SphericalTheta(in vec3 Wl)\n{\n return acos(clamp(Wl.y, -1.f, 1.f));\n}\n\nbool SameHemisphere(in vec3 Ww1, in vec3 Ww2)\n{\n return (Ww1.z * Ww2.z) > 0.0f;\n}\n\nvec2 getConcentricDiskSample(in vec2 U)\n{\n float r, theta;\n // Map 0..1 to -1..1\n float sx = 2.0 * U.x - 1.0;\n float sy = 2.0 * U.y - 1.0;\n\n // Map square to (r,theta)\n\n // Handle degeneracy at the origin\n if (sx == 0.0 && sy == 0.0)\n {\n return vec2(0.0f, 0.0f);\n }\n\n // quadrants of disk\n if (sx >= -sy)\n {\n if (sx > sy)\n {\n r = sx;\n if (sy > 0.0)\n theta = sy/r;\n else\n theta = 8.0f + sy/r;\n }\n else\n {\n r = sy;\n theta = 2.0f - sx/r;\n }\n }\n else\n {\n if (sx <= sy)\n {\n r = -sx;\n theta = 4.0f - sy/r;\n }\n else\n {\n r = -sy;\n theta = 6.0f + sx/r;\n }\n }\n\n theta *= PI_OVER_4;\n\n return vec2(r*cos(theta), r*sin(theta));\n}\n\nvec3 getCosineWeightedHemisphereSample(in vec2 U)\n{\n vec2 ret = getConcentricDiskSample(U);\n return vec3(ret.x, ret.y, sqrt(max(0.f, 1.f - ret.x * ret.x - ret.y * ret.y)));\n}\n\nstruct Ray {\n vec3 m_O;\n vec3 m_D;\n float m_MinT, m_MaxT;\n};\n\nvec3 rayAt(Ray r, float t) {\n return r.m_O + t*r.m_D;\n}\n\nRay GenerateCameraRay(in Camera cam, in vec2 Pixel, in vec2 ApertureRnd)\n{\n // negating ScreenPoint.y flips the up/down direction. depends on whether you want pixel 0 at top or bottom\n // we could also have flipped mScreen and mInvScreen, or cam.mV?\n vec2 ScreenPoint = vec2(\n cam.mScreen.x + (cam.mInvScreen.x * Pixel.x),\n cam.mScreen.z + (cam.mInvScreen.y * Pixel.y)\n );\n vec3 dxy = (ScreenPoint.x * cam.mU) + (-ScreenPoint.y * cam.mV);\n\n // orthographic camera ray: start at (camera pos + screen point), go in direction N\n // perspective camera ray: start at camera pos, go in direction (N + screen point)\n vec3 RayO = cam.mFrom + cam.mIsOrtho * dxy;\n vec3 RayD = normalize(cam.mN + (1.0 - cam.mIsOrtho) * dxy);\n\n if (cam.mApertureSize != 0.0f)\n {\n vec2 LensUV = cam.mApertureSize * getConcentricDiskSample(ApertureRnd);\n\n vec3 LI = cam.mU * LensUV.x + cam.mV * LensUV.y;\n RayO += LI;\n RayD = normalize((RayD * cam.mFocalDistance) - LI);\n }\n\n return Ray(RayO, RayD, 0.0, MAX_RAY_LEN);\n}\n\nbool IntersectBox(in Ray R, out float pNearT, out float pFarT)\n{\n vec3 invR\t\t= vec3(1.0f, 1.0f, 1.0f) / R.m_D;\n vec3 bottomT\t\t= invR * (vec3(gClippedAaBbMin.x, gClippedAaBbMin.y, gClippedAaBbMin.z) - R.m_O);\n vec3 topT\t\t= invR * (vec3(gClippedAaBbMax.x, gClippedAaBbMax.y, gClippedAaBbMax.z) - R.m_O);\n vec3 minT\t\t= min(topT, bottomT);\n vec3 maxT\t\t= max(topT, bottomT);\n float largestMinT = max(max(minT.x, minT.y), max(minT.x, minT.z));\n float smallestMaxT = min(min(maxT.x, maxT.y), min(maxT.x, maxT.z));\n\n pNearT = largestMinT;\n pFarT\t= smallestMaxT;\n\n return smallestMaxT > largestMinT;\n}\n\n// assume volume is centered at 0,0,0 so p spans -bounds to + bounds\n// transform p to range from 0,0,0 to 1,1,1 for volume texture sampling.\n// optionally invert axes\nvec3 PtoVolumeTex(vec3 p) {\n vec3 uvw = (p - gVolCenter) * gInvAaBbMax + vec3(0.5, 0.5, 0.5);\n // if flipVolume = 1, uvw is unchanged.\n // if flipVolume = -1, uvw = 1 - uvw\n uvw = (flipVolume*(uvw - 0.5) + 0.5);\n return uvw;\n}\n\nconst float UINT8_MAX = 1.0;//255.0;\n\n// strategy: sample up to 4 channels, and take the post-LUT maximum intensity as the channel that wins\n// we will return the unmapped raw intensity value from the volume so that other luts can be applied again later.\nfloat GetNormalizedIntensityMax4ch(in vec3 P, out int ch)\n{\n vec4 intensity = UINT8_MAX * texture(volumeTexture, PtoVolumeTex(P));\n\n //intensity = (intensity - gIntensityMin) / (gIntensityMax - gIntensityMin);\n vec4 ilut = vec4(0.0, 0.0, 0.0, 0.0);\n // w in the lut texture is \"opacity\"\n ilut.x = texture(gLutTexture, vec2(intensity.x, 0.5/4.0)).w / 255.0;\n ilut.y = texture(gLutTexture, vec2(intensity.y, 1.5/4.0)).w / 255.0;\n ilut.z = texture(gLutTexture, vec2(intensity.z, 2.5/4.0)).w / 255.0;\n ilut.w = texture(gLutTexture, vec2(intensity.w, 3.5/4.0)).w / 255.0;\n\n float maxIn = 0.0;\n float iOut = 0.0;\n ch = 0;\n for (int i = 0; i < min(gNChannels, 4); ++i) {\n if (ilut[i] > maxIn) {\n maxIn = ilut[i];\n ch = i;\n iOut = intensity[i];\n }\n }\n\n //return maxIn;\n return iOut;\n}\n\nfloat GetNormalizedIntensity4ch(vec3 P, int ch)\n{\n vec4 intensity = UINT8_MAX * texture(volumeTexture, PtoVolumeTex(P));\n // select channel\n float intensityf = intensity[ch];\n //intensityf = (intensityf - gIntensityMin[ch]) / (gIntensityMax[ch] - gIntensityMin[ch]);\n //intensityf = texture(gLutTexture, vec2(intensityf, (0.5+float(ch))/4.0)).x;\n\n return intensityf;\n}\n\n// note that gInvGradientDelta is maxpixeldim of volume\n// gGradientDeltaX,Y,Z is 1/X,Y,Z of volume\nvec3 Gradient4ch(vec3 P, int ch)\n{\n vec3 Gradient;\n\n Gradient.x = (GetNormalizedIntensity4ch(P + (gGradientDeltaX), ch) - GetNormalizedIntensity4ch(P - (gGradientDeltaX), ch)) * gInvGradientDelta;\n Gradient.y = (GetNormalizedIntensity4ch(P + (gGradientDeltaY), ch) - GetNormalizedIntensity4ch(P - (gGradientDeltaY), ch)) * gInvGradientDelta;\n Gradient.z = (GetNormalizedIntensity4ch(P + (gGradientDeltaZ), ch) - GetNormalizedIntensity4ch(P - (gGradientDeltaZ), ch)) * gInvGradientDelta;\n\n return Gradient;\n}\n\nfloat GetOpacity(float NormalizedIntensity, int ch)\n{\n // apply lut\n float o = texture(gLutTexture, vec2(NormalizedIntensity, (0.5+float(ch))/4.0)).w / 255.0;\n float Intensity = o * gOpacity[ch];\n return Intensity;\n}\n\nvec3 GetEmissionN(float NormalizedIntensity, int ch)\n{\n return gEmissive[ch];\n}\n\nvec3 GetDiffuseN(float NormalizedIntensity, int ch)\n{\n vec4 col = texture(gLutTexture, vec2(NormalizedIntensity, (0.5+float(ch))/4.0));\n //vec3 col = vec3(1.0, 1.0, 1.0);\n return col.xyz * gDiffuse[ch];\n}\n\nvec3 GetSpecularN(float NormalizedIntensity, int ch)\n{\n return gSpecular[ch];\n}\n\nfloat GetGlossinessN(float NormalizedIntensity, int ch)\n{\n return gGlossiness[ch];\n}\n\n// a bsdf sample, a sample on a light source, and a randomly chosen light index\nstruct LightingSample {\n float m_bsdfComponent;\n vec2 m_bsdfDir;\n vec2 m_lightPos;\n float m_lightComponent;\n float m_LightNum;\n};\n\nLightingSample LightingSample_LargeStep(inout uvec2 seed) {\n return LightingSample(\n rand(seed),\n vec2(rand(seed), rand(seed)),\n vec2(rand(seed), rand(seed)),\n rand(seed),\n rand(seed)\n );\n}\n\n// return a color xyz\nvec3 Light_Le(in Light light, in vec2 UV)\n{\n if (light.mT == 0)\n return RGBtoXYZ(light.mColor) / light.mArea;\n\n if (light.mT == 1)\n {\n if (UV.y > 0.0f)\n return RGBtoXYZ(mix(light.mColorMiddle, light.mColorTop, abs(UV.y)));\n else\n return RGBtoXYZ(mix(light.mColorMiddle, light.mColorBottom, abs(UV.y)));\n }\n\n return BLACK;\n}\n\n// return a color xyz\nvec3 Light_SampleL(in Light light, in vec3 P, out Ray Rl, out float Pdf, in LightingSample LS)\n{\n vec3 L = BLACK;\n Pdf = 0.0;\n vec3 Ro = vec3(0,0,0), Rd = vec3(0,0,1);\n if (light.mT == 0)\n {\n Ro = (light.mP + ((-0.5f + LS.m_lightPos.x) * light.mWidth * light.mU) + ((-0.5f + LS.m_lightPos.y) * light.mHeight * light.mV));\n Rd = normalize(P - Ro);\n L = dot(Rd, light.mN) > 0.0f ? Light_Le(light, vec2(0.0f)) : BLACK;\n Pdf = abs(dot(Rd, light.mN)) > 0.0f ? dot(P-Ro, P-Ro) / (abs(dot(Rd, light.mN)) * light.mArea) : 0.0f;\n }\n else if (light.mT == 1)\n {\n Ro = light.mP + light.mSkyRadius * getUniformSphereSample(LS.m_lightPos);\n Rd = normalize(P - Ro);\n L = Light_Le(light, vec2(1.0f) - 2.0f * LS.m_lightPos);\n Pdf = pow(light.mSkyRadius, 2.0f) / light.mArea;\n }\n\n Rl = Ray(Ro, Rd, 0.0f, length(P - Ro));\n\n return L;\n}\n\n// Intersect ray with light\nbool Light_Intersect(Light light, inout Ray R, out float T, out vec3 L, out float pPdf)\n{\n if (light.mT == 0)\n {\n // Compute projection\n float DotN = dot(R.m_D, light.mN);\n\n // Ray is coplanar with light surface\n if (DotN >= 0.0f)\n return false;\n\n // Compute hit distance\n T = (-light.mDistance - dot(R.m_O, light.mN)) / DotN;\n\n // Intersection is in ray's negative direction\n if (T < R.m_MinT || T > R.m_MaxT)\n return false;\n\n // Determine position on light\n vec3 Pl = rayAt(R, T);\n\n // Vector from point on area light to center of area light\n vec3 Wl = Pl - light.mP;\n\n // Compute texture coordinates\n vec2 UV = vec2(dot(Wl, light.mU), dot(Wl, light.mV));\n\n // Check if within bounds of light surface\n if (UV.x > light.mHalfWidth || UV.x < -light.mHalfWidth || UV.y > light.mHalfHeight || UV.y < -light.mHalfHeight)\n return false;\n\n R.m_MaxT = T;\n\n //pUV = UV;\n\n if (DotN < 0.0f)\n L = RGBtoXYZ(light.mColor) / light.mArea;\n else\n L = BLACK;\n\n pPdf = dot(R.m_O-Pl, R.m_O-Pl) / (DotN * light.mArea);\n\n return true;\n }\n\n else if (light.mT == 1)\n {\n T = light.mSkyRadius;\n\n // Intersection is in ray's negative direction\n if (T < R.m_MinT || T > R.m_MaxT)\n return false;\n\n R.m_MaxT = T;\n\n vec2 UV = vec2(SphericalPhi(R.m_D) * INV_2_PI, SphericalTheta(R.m_D) * INV_PI);\n\n L = Light_Le(light, vec2(1.0f,1.0f) - 2.0f * UV);\n\n pPdf = pow(light.mSkyRadius, 2.0f) / light.mArea;\n //pUV = UV;\n\n return true;\n }\n\n return false;\n}\n\nfloat Light_Pdf(in Light light, in vec3 P, in vec3 Wi)\n{\n vec3 L;\n vec2 UV;\n float Pdf = 1.0f;\n\n Ray Rl = Ray(P, Wi, 0.0f, 100000.0f);\n\n if (light.mT == 0)\n {\n float T = 0.0f;\n\n if (!Light_Intersect(light, Rl, T, L, Pdf))\n return 0.0f;\n\n return pow(T, 2.0f) / (abs(dot(light.mN, -Wi)) * light.mArea);\n }\n\n else if (light.mT == 1)\n {\n return pow(light.mSkyRadius, 2.0f) / light.mArea;\n }\n\n return 0.0f;\n}\n\nstruct VolumeShader {\n int m_Type; // 0 = bsdf, 1 = phase\n\n vec3 m_Kd; // isotropic phase // xyz color\n vec3 m_R; // specular reflectance\n float m_Ior;\n float m_Exponent;\n vec3 m_Nn;\n vec3 m_Nu;\n vec3 m_Nv;\n};\n\n// return a xyz color\nvec3 ShaderPhase_F(in VolumeShader shader, in vec3 Wo, in vec3 Wi)\n{\n return shader.m_Kd * INV_PI;\n}\n\nfloat ShaderPhase_Pdf(in VolumeShader shader, in vec3 Wo, in vec3 Wi)\n{\n return INV_4_PI;\n}\n\nvec3 ShaderPhase_SampleF(in VolumeShader shader, in vec3 Wo, out vec3 Wi, out float Pdf, in vec2 U)\n{\n Wi\t= getUniformSphereSample(U);\n Pdf\t= ShaderPhase_Pdf(shader, Wo, Wi);\n\n return ShaderPhase_F(shader, Wo, Wi);\n}\n\n// return a xyz color\nvec3 Lambertian_F(in VolumeShader shader, in vec3 Wo, in vec3 Wi)\n{\n return shader.m_Kd * INV_PI;\n}\n\nfloat Lambertian_Pdf(in VolumeShader shader, in vec3 Wo, in vec3 Wi)\n{\n //return abs(Wi.z)*INV_PI;\n return SameHemisphere(Wo, Wi) ? abs(Wi.z) * INV_PI : 0.0f;\n}\n\n// return a xyz color\nvec3 Lambertian_SampleF(in VolumeShader shader, in vec3 Wo, out vec3 Wi, out float Pdf, in vec2 U)\n{\n Wi = getCosineWeightedHemisphereSample(U);\n\n if (Wo.z < 0.0f)\n Wi.z *= -1.0f;\n\n Pdf = Lambertian_Pdf(shader, Wo, Wi);\n\n return Lambertian_F(shader, Wo, Wi);\n}\n\nvec3 SphericalDirection(in float SinTheta, in float CosTheta, in float Phi)\n{\n return vec3(SinTheta * cos(Phi), SinTheta * sin(Phi), CosTheta);\n}\n\nvoid Blinn_SampleF(in VolumeShader shader, in vec3 Wo, out vec3 Wi, out float Pdf, in vec2 U)\n{\n // Compute sampled half-angle vector wh for Blinn distribution\n float costheta = pow(U.x, 1.f / (shader.m_Exponent+1.0));\n float sintheta = sqrt(max(0.f, 1.f - costheta*costheta));\n float phi = U.y * 2.f * PI;\n\n vec3 wh = SphericalDirection(sintheta, costheta, phi);\n\n if (!SameHemisphere(Wo, wh))\n wh = -wh;\n\n // Compute incident direction by reflecting about wh\n Wi = -Wo + 2.f * dot(Wo, wh) * wh;\n\n // Compute PDF for wi from Blinn distribution\n float blinn_pdf = ((shader.m_Exponent + 1.f) * pow(costheta, shader.m_Exponent)) / (2.f * PI * 4.f * dot(Wo, wh));\n\n if (dot(Wo, wh) <= 0.f)\n blinn_pdf = 0.f;\n\n Pdf = blinn_pdf;\n}\n\nfloat Blinn_D(in VolumeShader shader, in vec3 wh)\n{\n float costhetah = abs(wh.z);//AbsCosTheta(wh);\n return (shader.m_Exponent+2.0) * INV_2_PI * pow(costhetah, shader.m_Exponent);\n}\nfloat Microfacet_G(in VolumeShader shader, in vec3 wo, in vec3 wi, in vec3 wh)\n{\n float NdotWh = abs(wh.z);//AbsCosTheta(wh);\n float NdotWo = abs(wo.z);//AbsCosTheta(wo);\n float NdotWi = abs(wi.z);//AbsCosTheta(wi);\n float WOdotWh = abs(dot(wo, wh));\n\n return min(1.f, min((2.f * NdotWh * NdotWo / WOdotWh), (2.f * NdotWh * NdotWi / WOdotWh)));\n}\n\nvec3 Microfacet_F(in VolumeShader shader, in vec3 wo, in vec3 wi)\n{\n float cosThetaO = abs(wo.z);//AbsCosTheta(wo);\n float cosThetaI = abs(wi.z);//AbsCosTheta(wi);\n\n if (cosThetaI == 0.f || cosThetaO == 0.f)\n return BLACK;\n\n vec3 wh = wi + wo;\n\n if (wh.x == 0. && wh.y == 0. && wh.z == 0.)\n return BLACK;\n\n wh = normalize(wh);\n float cosThetaH = dot(wi, wh);\n\n vec3 F = WHITE;//m_Fresnel.Evaluate(cosThetaH);\n\n return shader.m_R * Blinn_D(shader, wh) * Microfacet_G(shader, wo, wi, wh) * F / (4.f * cosThetaI * cosThetaO);\n}\n\nvec3 ShaderBsdf_WorldToLocal(in VolumeShader shader, in vec3 W)\n{\n return vec3(dot(W, shader.m_Nu), dot(W, shader.m_Nv), dot(W, shader.m_Nn));\n}\n\nvec3 ShaderBsdf_LocalToWorld(in VolumeShader shader, in vec3 W)\n{\n return vec3(\tshader.m_Nu.x * W.x + shader.m_Nv.x * W.y + shader.m_Nn.x * W.z,\n shader.m_Nu.y * W.x + shader.m_Nv.y * W.y + shader.m_Nn.y * W.z,\n shader.m_Nu.z * W.x + shader.m_Nv.z * W.y + shader.m_Nn.z * W.z);\n}\n\nfloat Blinn_Pdf(in VolumeShader shader, in vec3 Wo, in vec3 Wi)\n{\n vec3 wh = normalize(Wo + Wi);\n\n float costheta = abs(wh.z);//AbsCosTheta(wh);\n // Compute PDF for wi from Blinn distribution\n float blinn_pdf = ((shader.m_Exponent + 1.f) * pow(costheta, shader.m_Exponent)) / (2.f * PI * 4.f * dot(Wo, wh));\n\n if (dot(Wo, wh) <= 0.0f)\n blinn_pdf = 0.0f;\n\n return blinn_pdf;\n}\n\nvec3 Microfacet_SampleF(in VolumeShader shader, in vec3 wo, out vec3 wi, out float Pdf, in vec2 U)\n{\n Blinn_SampleF(shader, wo, wi, Pdf, U);\n\n if (!SameHemisphere(wo, wi))\n return BLACK;\n\n return Microfacet_F(shader, wo, wi);\n}\n\nfloat Microfacet_Pdf(in VolumeShader shader, in vec3 wo, in vec3 wi)\n{\n if (!SameHemisphere(wo, wi))\n return 0.0f;\n\n return Blinn_Pdf(shader, wo, wi);\n}\n\n// return a xyz color\nvec3 ShaderBsdf_F(in VolumeShader shader, in vec3 Wo, in vec3 Wi)\n{\n vec3 Wol = ShaderBsdf_WorldToLocal(shader, Wo);\n vec3 Wil = ShaderBsdf_WorldToLocal(shader, Wi);\n\n vec3 R = vec3(0,0,0);\n\n R += Lambertian_F(shader, Wol, Wil);\n R += Microfacet_F(shader, Wol, Wil);\n\n return R;\n}\n\nfloat ShaderBsdf_Pdf(in VolumeShader shader, in vec3 Wo, in vec3 Wi)\n{\n vec3 Wol = ShaderBsdf_WorldToLocal(shader, Wo);\n vec3 Wil = ShaderBsdf_WorldToLocal(shader, Wi);\n\n float Pdf = 0.0f;\n\n Pdf += Lambertian_Pdf(shader, Wol, Wil);\n Pdf += Microfacet_Pdf(shader, Wol, Wil);\n\n return Pdf;\n}\n\n\nvec3 ShaderBsdf_SampleF(in VolumeShader shader, in LightingSample S, in vec3 Wo, out vec3 Wi, out float Pdf, in vec2 U)\n{\n vec3 Wol = ShaderBsdf_WorldToLocal(shader, Wo);\n vec3 Wil = vec3(0,0,0);\n\n vec3 R = vec3(0,0,0);\n\n if (S.m_bsdfComponent <= 0.5f)\n {\n Lambertian_SampleF(shader, Wol, Wil, Pdf, S.m_bsdfDir);\n }\n else\n {\n Microfacet_SampleF(shader, Wol, Wil, Pdf, S.m_bsdfDir);\n }\n\n Pdf += Lambertian_Pdf(shader, Wol, Wil);\n Pdf += Microfacet_Pdf(shader, Wol, Wil);\n\n R += Lambertian_F(shader, Wol, Wil);\n R += Microfacet_F(shader, Wol, Wil);\n\n Wi = ShaderBsdf_LocalToWorld(shader, Wil);\n\n //return vec3(1,1,1);\n return R;\n}\n\n// return a xyz color\nvec3 Shader_F(in VolumeShader shader, in vec3 Wo, in vec3 Wi)\n{\n if (shader.m_Type == 0) {\n return ShaderBsdf_F(shader, Wo, Wi);\n }\n else {\n return ShaderPhase_F(shader, Wo, Wi);\n }\n}\n\nfloat Shader_Pdf(in VolumeShader shader, in vec3 Wo, in vec3 Wi)\n{\n if (shader.m_Type == 0) {\n return ShaderBsdf_Pdf(shader, Wo, Wi);\n }\n else {\n return ShaderPhase_Pdf(shader, Wo, Wi);\n }\n}\n\nvec3 Shader_SampleF(in VolumeShader shader, in LightingSample S, in vec3 Wo, out vec3 Wi, out float Pdf, in vec2 U)\n{\n //return vec3(1,0,0);\n if (shader.m_Type == 0) {\n return ShaderBsdf_SampleF(shader, S, Wo, Wi, Pdf, U);\n }\n else {\n return ShaderPhase_SampleF(shader, Wo, Wi, Pdf, U);\n }\n}\n\n\nbool IsBlack(in vec3 v) {\n return (v.x==0.0 && v.y == 0.0 && v.z == 0.0);\n}\n\nfloat PowerHeuristic(float nf, float fPdf, float ng, float gPdf)\n{\n float f = nf * fPdf;\n float g = ng * gPdf;\n // The power heuristic is Veach's MIS balance heuristic except each component is being squared\n // balance heuristic would be f/(f+g) ...?\n return (f * f) / (f * f + g * g);\n}\n\nfloat MISContribution(float pdf1, float pdf2)\n{\n return PowerHeuristic(1.0f, pdf1, 1.0f, pdf2);\n}\n\n// \"shadow ray\" using gStepSizeShadow, test whether it can exit the volume or not\nbool DoesSecondaryRayScatterInVolume(inout Ray R, inout uvec2 seed)\n{\n float MinT;\n float MaxT;\n vec3 Ps;\n\n if (!IntersectBox(R, MinT, MaxT))\n return false;\n\n MinT = max(MinT, R.m_MinT);\n MaxT = min(MaxT, R.m_MaxT);\n\n // delta (Woodcock) tracking\n float S\t= -log(rand(seed)) / gDensityScale;\n float Sum = 0.0f;\n float SigmaT = 0.0f;\n\n MinT += rand(seed) * gStepSizeShadow;\n int ch = 0;\n float intensity = 0.0;\n while (Sum < S)\n {\n Ps = rayAt(R, MinT); // R.m_O + MinT * R.m_D;\n\n if (MinT > MaxT)\n return false;\n\n intensity = GetNormalizedIntensityMax4ch(Ps, ch);\n SigmaT = gDensityScale * GetOpacity(intensity, ch);\n\n Sum += SigmaT * gStepSizeShadow;\n MinT += gStepSizeShadow;\n }\n\n return true;\n}\n\nint GetNearestLight(Ray R, out vec3 oLightColor, out vec3 Pl, out float oPdf)\n{\n int hit = -1;\n float T = 0.0f;\n Ray rayCopy = R;\n float pdf = 0.0f;\n\n for (int i = 0; i < 2; i++)\n {\n if (Light_Intersect(gLights[i], rayCopy, T, oLightColor, pdf))\n {\n Pl = rayAt(R, T);\n hit = i;\n }\n }\n oPdf = pdf;\n\n return hit;\n}\n\n// return a XYZ color\n// Wo is direction from scatter point out toward incident ray direction\n\n// Wi goes toward light sample and is not necessarily perfect reflection of Wo\n// ^Wi ^N ^Wo\n// \\\\ | //\n// \\\\ | //\n// \\\\ | //\n// \\\\ | //\n// \\\\|// Pe = volume sample where scattering occurs\n// ---------\nvec3 EstimateDirectLight(int shaderType, float Density, int ch, in Light light, in LightingSample LS, in vec3 Wo, in vec3 Pe, in vec3 N, inout uvec2 seed)\n{\n vec3 Ld = BLACK, Li = BLACK, F = BLACK;\n\n vec3 diffuse = GetDiffuseN(Density, ch);\n vec3 specular = GetSpecularN(Density, ch);\n float glossiness = GetGlossinessN(Density, ch);\n\n // can N and Wo be coincident????\n vec3 nu = normalize(cross(N, Wo));\n vec3 nv = normalize(cross(N, nu));\n\n // the IoR here is hard coded... and unused!!!!\n VolumeShader Shader = VolumeShader(shaderType, RGBtoXYZ(diffuse), RGBtoXYZ(specular), 2.5f, glossiness, N, nu, nv);\n\n float LightPdf = 1.0f, ShaderPdf = 1.0f;\n\n Ray Rl = Ray(vec3(0,0,0), vec3(0,0,1.0), 0.0, MAX_RAY_LEN);\n // Rl is ray from light toward Pe in volume, with a max traversal of the distance from Pe to Light sample pos.\n Li = Light_SampleL(light, Pe, Rl, LightPdf, LS);\n\n // Wi: negate ray direction: from volume scatter point toward light...?\n vec3 Wi = -Rl.m_D, P = vec3(0,0,0);\n\n // we will calculate two lighting contributions and combine them by MIS.\n\n F = Shader_F(Shader,Wo, Wi);\n\n ShaderPdf = Shader_Pdf(Shader, Wo, Wi);\n\n // get a lighting contribution along Rl; see if Rl would scatter in the volume or not\n if (!IsBlack(Li) && (ShaderPdf > 0.0f) && (LightPdf > 0.0f) && !DoesSecondaryRayScatterInVolume(Rl, seed))\n {\n // ray from light can see through volume to Pe!\n\n float dotProd = 1.0;\n if (shaderType == ShaderType_Brdf){\n\n // (use abs or clamp here?)\n dotProd = abs(dot(Wi, N));\n }\n Ld += F * Li * dotProd * MISContribution(LightPdf, ShaderPdf) / LightPdf;\n\n }\n\n // get a lighting contribution by sampling nearest light from the scattering point\n F = Shader_SampleF(Shader, LS, Wo, Wi, ShaderPdf, LS.m_bsdfDir);\n if (!IsBlack(F) && (ShaderPdf > 0.0f))\n {\n vec3 Pl = vec3(0,0,0);\n int n = GetNearestLight(Ray(Pe, Wi, 0.0f, 1000000.0f), Li, Pl, LightPdf);\n if (n > -1)\n {\n Light pLight = gLights[n];\n LightPdf = Light_Pdf(pLight, Pe, Wi);\n\n if ((LightPdf > 0.0f) && !IsBlack(Li)) {\n Ray rr = Ray(Pl, normalize(Pe - Pl), 0.0f, length(Pe - Pl));\n if (!DoesSecondaryRayScatterInVolume(rr, seed))\n {\n float dotProd = 1.0;\n if (shaderType == ShaderType_Brdf){\n\n // (use abs or clamp here?)\n dotProd = abs(dot(Wi, N));\n }\n // note order of MIS params is swapped\n Ld += F * Li * dotProd * MISContribution(ShaderPdf, LightPdf) / ShaderPdf;\n }\n\n }\n }\n }\n\n return Ld;\n\n}\n\n// return a linear xyz color\nvec3 UniformSampleOneLight(int shaderType, float Density, int ch, in vec3 Wo, in vec3 Pe, in vec3 N, inout uvec2 seed)\n{\n //if (NUM_LIGHTS == 0)\n // return BLACK;\n\n // select a random light, a random 2d sample on light, and a random 2d sample on brdf\n LightingSample LS = LightingSample_LargeStep(seed);\n\n int WhichLight = int(floor(LS.m_LightNum * float(NUM_LIGHTS)));\n\n Light light = gLights[WhichLight];\n\n return float(NUM_LIGHTS) * EstimateDirectLight(shaderType, Density, ch, light, LS, Wo, Pe, N, seed);\n\n}\n\nbool SampleScatteringEvent(inout Ray R, inout uvec2 seed, out vec3 Ps)\n{\n float MinT;\n float MaxT;\n\n if (!IntersectBox(R, MinT, MaxT))\n return false;\n\n MinT = max(MinT, R.m_MinT);\n MaxT = min(MaxT, R.m_MaxT);\n\n // delta (Woodcock) tracking\n\n // notes, not necessarily coherent:\n // ray march along the ray's projected path and keep an average sigmaT value.\n // The distance is weighted by the intensity at each ray step sample. High intensity increases the apparent distance.\n // When the distance has become greater than the average sigmaT value given by -log(RandomFloat[0, 1]) / averageSigmaT\n // then that would be considered the interaction position.\n\n // sigmaT = sigmaA + sigmaS = absorption coeff + scattering coeff = extinction coeff\n\n // Beer-Lambert law: transmittance T(t) = exp(-sigmaT*t) where t is a distance!\n\n // importance sampling the exponential function to produce a free path distance S\n // the PDF is p(t) = sigmaT * exp(-sigmaT * t)\n // In a homogeneous volume,\n // S is the free-path distance = -ln(1-zeta)/sigmaT where zeta is a random variable\n // density scale = 0 => S --> 0..inf. Low density means randomly sized ray paths\n // density scale = inf => S --> 0. High density means short ray paths!\n\n // note that ln(x:0..1) is negative\n\n // here gDensityScale represents sigmaMax, a majorant of sigmaT\n // it is a parameter that should be set as close to the max extinction coefficient as possible.\n float S\t= -log(rand(seed)) / gDensityScale;\n\n float Sum\t\t= 0.0f;\n float SigmaT\t= 0.0f; // accumulated extinction along ray march\n\n // start: take one step now.\n MinT += rand(seed) * gStepSize;\n\n int ch = 0;\n float intensity = 0.0;\n\n // ray march until we have traveled S (or hit the maxT of the ray)\n while (Sum < S)\n {\n Ps = rayAt(R, MinT); // R.m_O + MinT * R.m_D;\n\n // if we exit the volume with no scattering\n if (MinT > MaxT)\n return false;\n\n intensity = GetNormalizedIntensityMax4ch(Ps, ch);\n SigmaT = gDensityScale * GetOpacity(intensity, ch);\n\n Sum += SigmaT * gStepSize;\n MinT += gStepSize;\n }\n\n // at this time, MinT - original MinT is the T transmission distance before a scatter event.\n // Ps is the point\n\n return true;\n}\n\n\nvec4 CalculateRadiance(inout uvec2 seed) {\n float r = rand(seed);\n //return vec4(r,0,0,1);\n\n vec3 Lv = BLACK, Li = BLACK;\n\n //Ray Re = Ray(vec3(0,0,0), vec3(0,0,1), 0.0, MAX_RAY_LEN);\n\n vec2 UV = vUv*uResolution + vec2(rand(seed), rand(seed));\n\n Ray Re = GenerateCameraRay(gCamera, UV, vec2(rand(seed), rand(seed)));\n\n //return vec4(vUv, 0.0, 1.0);\n //return vec4(0.5*(Re.m_D + 1.0), 1.0);\n //return vec4(Re.m_D, 1.0);\n\n //Re.m_MinT = 0.0f;\n //Re.m_MaxT = MAX_RAY_LEN;\n\n vec3 Pe = vec3(0,0,0), Pl = vec3(0,0,0);\n float lpdf = 0.0;\n\n float alpha = 0.0;\n // find point Pe along ray Re\n if (SampleScatteringEvent(Re, seed, Pe))\n {\n alpha = 1.0;\n // is there a light between Re.m_O and Pe? (ray's maxT is distance to Pe)\n // (test to see if area light was hit before volume.)\n int i = GetNearestLight(Ray(Re.m_O, Re.m_D, 0.0f, length(Pe - Re.m_O)), Li, Pl, lpdf);\n if (i > -1)\n {\n // set sample pixel value in frame estimate (prior to accumulation)\n return vec4(Li, 1.0);\n }\n\n int ch = 0;\n float D = GetNormalizedIntensityMax4ch(Pe, ch);\n\n // emission from volume\n Lv += RGBtoXYZ(GetEmissionN(D, ch));\n\n vec3 gradient = Gradient4ch(Pe, ch);\n // send ray out from Pe toward light\n switch (gShadingType)\n {\n case ShaderType_Brdf:\n {\n Lv += UniformSampleOneLight(ShaderType_Brdf, D, ch, normalize(-Re.m_D), Pe, normalize(gradient), seed);\n break;\n }\n\n case ShaderType_Phase:\n {\n Lv += 0.5f * UniformSampleOneLight(ShaderType_Phase, D, ch, normalize(-Re.m_D), Pe, normalize(gradient), seed);\n break;\n }\n\n case ShaderType_Mixed:\n {\n //const float GradMag = GradientMagnitude(Pe, volumedata.gradientVolumeTexture[ch]) * (1.0/volumedata.intensityMax[ch]);\n float GradMag = length(gradient);\n float PdfBrdf = (1.0f - exp(-gGradientFactor * GradMag));\n\n vec3 cls; // xyz color\n if (rand(seed) < PdfBrdf) {\n cls = UniformSampleOneLight(ShaderType_Brdf, D, ch, normalize(-Re.m_D), Pe, normalize(gradient), seed);\n }\n else {\n cls = 0.5f * UniformSampleOneLight(ShaderType_Phase, D, ch, normalize(-Re.m_D), Pe, normalize(gradient), seed);\n }\n\n Lv += cls;\n\n break;\n }\n }\n }\n else\n {\n // background color:\n // set Lv to a selected color based on environment light source?\n // if (uShowLights > 0.0) {\n // int n = GetNearestLight(Ray(Re.m_O, Re.m_D, 0.0f, 1000000.0f), Li, Pl, lpdf);\n // if (n > -1)\n // Lv = Li;\n // }\n //Lv = vec3(r,0,0);\n }\n\n // set sample pixel value in frame estimate (prior to accumulation)\n\n return vec4(Lv, alpha);\n}\n\nvec4 CumulativeMovingAverage(vec4 A, vec4 Ax, float N)\n{\n return A + ((Ax - A) / max((N), 1.0f));\n}\n\nvoid main()\n{\n // seed for rand(seed) function\n uvec2 seed = uvec2(uFrameCounter, uFrameCounter + 1.0) * uvec2(gl_FragCoord);\n\n // perform path tracing and get resulting pixel color\n vec4 pixelColor = CalculateRadiance( seed );\n\n vec4 previousColor = texture(tPreviousTexture, vUv);\n if (uSampleCounter < 1.0) {\n previousColor = vec4(0,0,0,0);\n }\n\n pc_fragColor = CumulativeMovingAverage(previousColor, pixelColor, uSampleCounter);\n}\n";
|
|
4
|
+
const pathTraceFragmentShader = "precision highp float;\nprecision highp int;\nprecision highp sampler2D;\nprecision highp sampler3D;\n\n#define PI (3.1415926535897932384626433832795)\n#define PI_OVER_2 (1.57079632679489661923)\n#define PI_OVER_4 (0.785398163397448309616)\n#define INV_PI (1.0/PI)\n#define INV_2_PI (0.5/PI)\n#define INV_4_PI (0.25/PI)\n\nconst vec3 BLACK = vec3(0, 0, 0);\nconst vec3 WHITE = vec3(1.0, 1.0, 1.0);\nconst int ShaderType_Brdf = 0;\nconst int ShaderType_Phase = 1;\nconst int ShaderType_Mixed = 2;\nconst float MAX_RAY_LEN = 1500000.0;\n\nin vec2 vUv;\n\nstruct Camera {\n vec3 mFrom;\n vec3 mU, mV, mN;\n vec4 mScreen; // left, right, bottom, top\n vec2 mInvScreen; // 1/w, 1/h\n float mFocalDistance;\n float mApertureSize;\n float mIsOrtho; // 1 or 0\n};\n\nuniform Camera gCamera;\n\nstruct Light {\n float mTheta;\n float mPhi;\n float mWidth;\n float mHalfWidth;\n float mHeight;\n float mHalfHeight;\n float mDistance;\n float mSkyRadius;\n vec3 mP;\n vec3 mTarget;\n vec3 mN;\n vec3 mU;\n vec3 mV;\n float mArea;\n float mAreaPdf;\n vec3 mColor;\n vec3 mColorTop;\n vec3 mColorMiddle;\n vec3 mColorBottom;\n int mT;\n};\nconst int NUM_LIGHTS = 2;\nuniform Light gLights[2];\n\nuniform vec3 gClippedAaBbMin;\nuniform vec3 gClippedAaBbMax;\nuniform vec3 gVolCenter;\nuniform float gDensityScale;\nuniform float gStepSize;\nuniform float gStepSizeShadow;\nuniform sampler3D volumeTexture;\nuniform vec3 gInvAaBbMax;\nuniform int gNChannels;\nuniform int gShadingType;\nuniform vec3 gGradientDeltaX;\nuniform vec3 gGradientDeltaY;\nuniform vec3 gGradientDeltaZ;\nuniform float gInvGradientDelta;\nuniform float gGradientFactor;\nuniform float uShowLights;\nuniform vec3 flipVolume;\n\n// per channel\n// the luttexture is a 256x4 rgba texture\n// each row is a 256 element lookup table.\nuniform sampler2D gLutTexture;\nuniform vec4 gIntensityMax;\nuniform vec4 gIntensityMin;\nuniform float gOpacity[4];\nuniform vec3 gEmissive[4];\nuniform vec3 gDiffuse[4];\nuniform vec3 gSpecular[4];\nuniform float gGlossiness[4];\n\n// compositing / progressive render\nuniform float uFrameCounter;\nuniform float uSampleCounter;\nuniform vec2 uResolution;\nuniform sampler2D tPreviousTexture;\n\n// from iq https://www.shadertoy.com/view/4tXyWN\nfloat rand(inout uvec2 seed) {\n seed += uvec2(1);\n uvec2 q = 1103515245U * ((seed >> 1U) ^ (seed.yx));\n uint n = 1103515245U * ((q.x) ^ (q.y >> 3U));\n return float(n) * (1.0 / float(0xffffffffU));\n}\n\nvec3 XYZtoRGB(vec3 xyz) {\n return vec3(\n 3.240479f*xyz[0] - 1.537150f*xyz[1] - 0.498535f*xyz[2],\n -0.969256f*xyz[0] + 1.875991f*xyz[1] + 0.041556f*xyz[2],\n 0.055648f*xyz[0] - 0.204043f*xyz[1] + 1.057311f*xyz[2]\n );\n}\n\n// Used to convert from linear RGB to XYZ space\nconst mat3 RGB_2_XYZ = (mat3(\n 0.4124564, 0.3575761, 0.1804375,\n 0.2126729, 0.7151522, 0.0721750,\n 0.0193339, 0.1191920, 0.9503041\n));\nvec3 RGBtoXYZ(vec3 rgb) {\n return rgb * RGB_2_XYZ;\n}\n\nvec3 getUniformSphereSample(in vec2 U) {\n float z = 1. - 2. * U.x;\n float r = sqrt(max(0., 1. - z * z));\n float phi = 2. * PI * U.y;\n float x = r * cos(phi);\n float y = r * sin(phi);\n return vec3(x, y, z);\n}\n\nfloat SphericalPhi(in vec3 Wl) {\n float p = atan(Wl.z, Wl.x);\n return (p < 0.) ? p + 2. * PI : p;\n}\n\nfloat SphericalTheta(in vec3 Wl) {\n return acos(clamp(Wl.y, -1., 1.));\n}\n\nbool SameHemisphere(in vec3 Ww1, in vec3 Ww2) {\n return (Ww1.z * Ww2.z) > 0.0;\n}\n\nvec2 getConcentricDiskSample(in vec2 U) {\n float r, theta;\n // Map 0..1 to -1..1\n float sx = 2.0 * U.x - 1.0;\n float sy = 2.0 * U.y - 1.0;\n\n // Map square to (r,theta)\n\n // Handle degeneracy at the origin\n if (sx == 0.0 && sy == 0.0) {\n return vec2(0.0, 0.0);\n }\n\n // quadrants of disk\n if (sx >= -sy) {\n if (sx > sy) {\n r = sx;\n if (sy > 0.0)\n theta = sy / r;\n else\n theta = 8.0 + sy / r;\n } else {\n r = sy;\n theta = 2.0 - sx / r;\n }\n } else {\n if (sx <= sy) {\n r = -sx;\n theta = 4.0 - sy / r;\n } else {\n r = -sy;\n theta = 6.0 + sx / r;\n }\n }\n\n theta *= PI_OVER_4;\n\n return vec2(r * cos(theta), r * sin(theta));\n}\n\nvec3 getCosineWeightedHemisphereSample(in vec2 U) {\n vec2 ret = getConcentricDiskSample(U);\n return vec3(ret.x, ret.y, sqrt(max(0., 1. - ret.x * ret.x - ret.y * ret.y)));\n}\n\nstruct Ray {\n vec3 m_O;\n vec3 m_D;\n float m_MinT, m_MaxT;\n};\n\nvec3 rayAt(Ray r, float t) {\n return r.m_O + t * r.m_D;\n}\n\nRay GenerateCameraRay(in Camera cam, in vec2 Pixel, in vec2 ApertureRnd) {\n // negating ScreenPoint.y flips the up/down direction. depends on whether you want pixel 0 at top or bottom\n // we could also have flipped mScreen and mInvScreen, or cam.mV?\n vec2 ScreenPoint = vec2(\n cam.mScreen.x + (cam.mInvScreen.x * Pixel.x),\n cam.mScreen.z + (cam.mInvScreen.y * Pixel.y)\n );\n vec3 dxy = (ScreenPoint.x * cam.mU) + (-ScreenPoint.y * cam.mV);\n\n // orthographic camera ray: start at (camera pos + screen point), go in direction N\n // perspective camera ray: start at camera pos, go in direction (N + screen point)\n vec3 RayO = cam.mFrom + cam.mIsOrtho * dxy;\n vec3 RayD = normalize(cam.mN + (1.0 - cam.mIsOrtho) * dxy);\n\n if (cam.mApertureSize != 0.0) {\n vec2 LensUV = cam.mApertureSize * getConcentricDiskSample(ApertureRnd);\n\n vec3 LI = cam.mU * LensUV.x + cam.mV * LensUV.y;\n RayO += LI;\n RayD = normalize((RayD * cam.mFocalDistance) - LI);\n }\n\n return Ray(RayO, RayD, 0.0, MAX_RAY_LEN);\n}\n\nbool IntersectBox(in Ray R, out float pNearT, out float pFarT) {\n vec3 invR = vec3(1.0, 1.0, 1.0) / R.m_D;\n vec3 bottomT = invR * (vec3(gClippedAaBbMin.x, gClippedAaBbMin.y, gClippedAaBbMin.z) - R.m_O);\n vec3 topT = invR * (vec3(gClippedAaBbMax.x, gClippedAaBbMax.y, gClippedAaBbMax.z) - R.m_O);\n vec3 minT = min(topT, bottomT);\n vec3 maxT = max(topT, bottomT);\n float largestMinT = max(max(minT.x, minT.y), max(minT.x, minT.z));\n float smallestMaxT = min(min(maxT.x, maxT.y), min(maxT.x, maxT.z));\n\n pNearT = largestMinT;\n pFarT = smallestMaxT;\n\n return smallestMaxT > largestMinT;\n}\n\n// assume volume is centered at 0,0,0 so p spans -bounds to + bounds\n// transform p to range from 0,0,0 to 1,1,1 for volume texture sampling.\n// optionally invert axes\nvec3 PtoVolumeTex(vec3 p) {\n vec3 uvw = (p - gVolCenter) * gInvAaBbMax + vec3(0.5, 0.5, 0.5);\n // if flipVolume = 1, uvw is unchanged.\n // if flipVolume = -1, uvw = 1 - uvw\n uvw = (flipVolume * (uvw - 0.5) + 0.5);\n return uvw;\n}\n\nconst float UINT8_MAX = 1.0;//255.0;\n\n// strategy: sample up to 4 channels, and take the post-LUT maximum intensity as the channel that wins\n// we will return the unmapped raw intensity value from the volume so that other luts can be applied again later.\nfloat GetNormalizedIntensityMax4ch(in vec3 P, out int ch) {\n vec4 intensity = UINT8_MAX * texture(volumeTexture, PtoVolumeTex(P));\n\n //intensity = (intensity - gIntensityMin) / (gIntensityMax - gIntensityMin);\n vec4 ilut = vec4(0.0, 0.0, 0.0, 0.0);\n // w in the lut texture is \"opacity\"\n ilut.x = texture(gLutTexture, vec2(intensity.x, 0.5 / 4.0)).w / 255.0;\n ilut.y = texture(gLutTexture, vec2(intensity.y, 1.5 / 4.0)).w / 255.0;\n ilut.z = texture(gLutTexture, vec2(intensity.z, 2.5 / 4.0)).w / 255.0;\n ilut.w = texture(gLutTexture, vec2(intensity.w, 3.5 / 4.0)).w / 255.0;\n\n float maxIn = 0.0;\n float iOut = 0.0;\n ch = 0;\n for (int i = 0; i < min(gNChannels, 4); ++i) {\n if (ilut[i] > maxIn) {\n maxIn = ilut[i];\n ch = i;\n iOut = intensity[i];\n }\n }\n\n //return maxIn;\n return iOut;\n}\n\nfloat GetNormalizedIntensity4ch(vec3 P, int ch) {\n vec4 intensity = UINT8_MAX * texture(volumeTexture, PtoVolumeTex(P));\n // select channel\n float intensityf = intensity[ch];\n //intensityf = (intensityf - gIntensityMin[ch]) / (gIntensityMax[ch] - gIntensityMin[ch]);\n //intensityf = texture(gLutTexture, vec2(intensityf, (0.5+float(ch))/4.0)).x;\n\n return intensityf;\n}\n\n// note that gInvGradientDelta is maxpixeldim of volume\n// gGradientDeltaX,Y,Z is 1/X,Y,Z of volume\nvec3 Gradient4ch(vec3 P, int ch) {\n vec3 Gradient;\n\n Gradient.x = (GetNormalizedIntensity4ch(P + (gGradientDeltaX), ch) - GetNormalizedIntensity4ch(P - (gGradientDeltaX), ch)) * gInvGradientDelta;\n Gradient.y = (GetNormalizedIntensity4ch(P + (gGradientDeltaY), ch) - GetNormalizedIntensity4ch(P - (gGradientDeltaY), ch)) * gInvGradientDelta;\n Gradient.z = (GetNormalizedIntensity4ch(P + (gGradientDeltaZ), ch) - GetNormalizedIntensity4ch(P - (gGradientDeltaZ), ch)) * gInvGradientDelta;\n\n return Gradient;\n}\n\nfloat GetOpacity(float NormalizedIntensity, int ch) {\n // apply lut\n float o = texture(gLutTexture, vec2(NormalizedIntensity, (0.5 + float(ch)) / 4.0)).w / 255.0;\n float Intensity = o * gOpacity[ch];\n return Intensity;\n}\n\nvec3 GetEmissionN(float NormalizedIntensity, int ch) {\n return gEmissive[ch];\n}\n\nvec3 GetDiffuseN(float NormalizedIntensity, int ch) {\n vec4 col = texture(gLutTexture, vec2(NormalizedIntensity, (0.5 + float(ch)) / 4.0));\n //vec3 col = vec3(1.0, 1.0, 1.0);\n return col.xyz * gDiffuse[ch];\n}\n\nvec3 GetSpecularN(float NormalizedIntensity, int ch) {\n return gSpecular[ch];\n}\n\nfloat GetGlossinessN(float NormalizedIntensity, int ch) {\n return gGlossiness[ch];\n}\n\n// a bsdf sample, a sample on a light source, and a randomly chosen light index\nstruct LightingSample {\n float m_bsdfComponent;\n vec2 m_bsdfDir;\n vec2 m_lightPos;\n float m_lightComponent;\n float m_LightNum;\n};\n\nLightingSample LightingSample_LargeStep(inout uvec2 seed) {\n return LightingSample(\n rand(seed),\n vec2(rand(seed), rand(seed)),\n vec2(rand(seed), rand(seed)),\n rand(seed),\n rand(seed)\n );\n}\n\n// return a color xyz\nvec3 Light_Le(in Light light, in vec2 UV) {\n if (light.mT == 0)\n return RGBtoXYZ(light.mColor) / light.mArea;\n\n if (light.mT == 1) {\n if (UV.y > 0.0)\n return RGBtoXYZ(mix(light.mColorMiddle, light.mColorTop, abs(UV.y)));\n else\n return RGBtoXYZ(mix(light.mColorMiddle, light.mColorBottom, abs(UV.y)));\n }\n\n return BLACK;\n}\n\n// return a color xyz\nvec3 Light_SampleL(in Light light, in vec3 P, out Ray Rl, out float Pdf, in LightingSample LS) {\n vec3 L = BLACK;\n Pdf = 0.0;\n vec3 Ro = vec3(0, 0, 0), Rd = vec3(0, 0, 1);\n if (light.mT == 0) {\n Ro = (light.mP + ((-0.5 + LS.m_lightPos.x) * light.mWidth * light.mU) + ((-0.5 + LS.m_lightPos.y) * light.mHeight * light.mV));\n Rd = normalize(P - Ro);\n L = dot(Rd, light.mN) > 0.0 ? Light_Le(light, vec2(0.0)) : BLACK;\n Pdf = abs(dot(Rd, light.mN)) > 0.0 ? dot(P - Ro, P - Ro) / (abs(dot(Rd, light.mN)) * light.mArea) : 0.0;\n } else if (light.mT == 1) {\n Ro = light.mP + light.mSkyRadius * getUniformSphereSample(LS.m_lightPos);\n Rd = normalize(P - Ro);\n L = Light_Le(light, vec2(1.0) - 2.0 * LS.m_lightPos);\n Pdf = pow(light.mSkyRadius, 2.0) / light.mArea;\n }\n\n Rl = Ray(Ro, Rd, 0.0, length(P - Ro));\n\n return L;\n}\n\n// Intersect ray with light\nbool Light_Intersect(Light light, inout Ray R, out float T, out vec3 L, out float pPdf) {\n if (light.mT == 0) {\n // Compute projection\n float DotN = dot(R.m_D, light.mN);\n\n // Ray is coplanar with light surface\n if (DotN >= 0.0)\n return false;\n\n // Compute hit distance\n T = (-light.mDistance - dot(R.m_O, light.mN)) / DotN;\n\n // Intersection is in ray's negative direction\n if (T < R.m_MinT || T > R.m_MaxT)\n return false;\n\n // Determine position on light\n vec3 Pl = rayAt(R, T);\n\n // Vector from point on area light to center of area light\n vec3 Wl = Pl - light.mP;\n\n // Compute texture coordinates\n vec2 UV = vec2(dot(Wl, light.mU), dot(Wl, light.mV));\n\n // Check if within bounds of light surface\n if (UV.x > light.mHalfWidth || UV.x < -light.mHalfWidth || UV.y > light.mHalfHeight || UV.y < -light.mHalfHeight)\n return false;\n\n R.m_MaxT = T;\n\n //pUV = UV;\n\n if (DotN < 0.0)\n L = RGBtoXYZ(light.mColor) / light.mArea;\n else\n L = BLACK;\n\n pPdf = dot(R.m_O - Pl, R.m_O - Pl) / (DotN * light.mArea);\n\n return true;\n } else if (light.mT == 1) {\n T = light.mSkyRadius;\n\n // Intersection is in ray's negative direction\n if (T < R.m_MinT || T > R.m_MaxT)\n return false;\n\n R.m_MaxT = T;\n\n vec2 UV = vec2(SphericalPhi(R.m_D) * INV_2_PI, SphericalTheta(R.m_D) * INV_PI);\n\n L = Light_Le(light, vec2(1.0, 1.0) - 2.0 * UV);\n\n pPdf = pow(light.mSkyRadius, 2.0) / light.mArea;\n //pUV = UV;\n\n return true;\n }\n\n return false;\n}\n\nfloat Light_Pdf(in Light light, in vec3 P, in vec3 Wi) {\n vec3 L;\n vec2 UV;\n float Pdf = 1.0;\n\n Ray Rl = Ray(P, Wi, 0.0, 100000.0);\n\n if (light.mT == 0) {\n float T = 0.0;\n\n if (!Light_Intersect(light, Rl, T, L, Pdf))\n return 0.0;\n\n return pow(T, 2.0) / (abs(dot(light.mN, -Wi)) * light.mArea);\n } else if (light.mT == 1) {\n return pow(light.mSkyRadius, 2.0) / light.mArea;\n }\n\n return 0.0;\n}\n\nstruct VolumeShader {\n int m_Type; // 0 = bsdf, 1 = phase\n\n vec3 m_Kd; // isotropic phase // xyz color\n vec3 m_R; // specular reflectance\n float m_Ior;\n float m_Exponent;\n vec3 m_Nn;\n vec3 m_Nu;\n vec3 m_Nv;\n};\n\n// return a xyz color\nvec3 ShaderPhase_F(in VolumeShader shader, in vec3 Wo, in vec3 Wi) {\n return shader.m_Kd * INV_PI;\n}\n\nfloat ShaderPhase_Pdf(in VolumeShader shader, in vec3 Wo, in vec3 Wi) {\n return INV_4_PI;\n}\n\nvec3 ShaderPhase_SampleF(in VolumeShader shader, in vec3 Wo, out vec3 Wi, out float Pdf, in vec2 U) {\n Wi = getUniformSphereSample(U);\n Pdf = ShaderPhase_Pdf(shader, Wo, Wi);\n\n return ShaderPhase_F(shader, Wo, Wi);\n}\n\n// return a xyz color\nvec3 Lambertian_F(in VolumeShader shader, in vec3 Wo, in vec3 Wi) {\n return shader.m_Kd * INV_PI;\n}\n\nfloat Lambertian_Pdf(in VolumeShader shader, in vec3 Wo, in vec3 Wi) {\n //return abs(Wi.z)*INV_PI;\n return SameHemisphere(Wo, Wi) ? abs(Wi.z) * INV_PI : 0.0;\n}\n\n// return a xyz color\nvec3 Lambertian_SampleF(in VolumeShader shader, in vec3 Wo, out vec3 Wi, out float Pdf, in vec2 U) {\n Wi = getCosineWeightedHemisphereSample(U);\n\n if (Wo.z < 0.0)\n Wi.z *= -1.0;\n\n Pdf = Lambertian_Pdf(shader, Wo, Wi);\n\n return Lambertian_F(shader, Wo, Wi);\n}\n\nvec3 SphericalDirection(in float SinTheta, in float CosTheta, in float Phi) {\n return vec3(SinTheta * cos(Phi), SinTheta * sin(Phi), CosTheta);\n}\n\nvoid Blinn_SampleF(in VolumeShader shader, in vec3 Wo, out vec3 Wi, out float Pdf, in vec2 U) {\n // Compute sampled half-angle vector wh for Blinn distribution\n float costheta = pow(U.x, 1. / (shader.m_Exponent + 1.0));\n float sintheta = sqrt(max(0., 1. - costheta * costheta));\n float phi = U.y * 2. * PI;\n\n vec3 wh = SphericalDirection(sintheta, costheta, phi);\n\n if (!SameHemisphere(Wo, wh))\n wh = -wh;\n\n // Compute incident direction by reflecting about wh\n Wi = -Wo + 2. * dot(Wo, wh) * wh;\n\n // Compute PDF for wi from Blinn distribution\n float blinn_pdf = ((shader.m_Exponent + 1.) * pow(costheta, shader.m_Exponent)) / (2. * PI * 4. * dot(Wo, wh));\n\n if (dot(Wo, wh) <= 0.)\n blinn_pdf = 0.;\n\n Pdf = blinn_pdf;\n}\n\nfloat Blinn_D(in VolumeShader shader, in vec3 wh) {\n float costhetah = abs(wh.z);//AbsCosTheta(wh);\n return (shader.m_Exponent + 2.0) * INV_2_PI * pow(costhetah, shader.m_Exponent);\n}\nfloat Microfacet_G(in VolumeShader shader, in vec3 wo, in vec3 wi, in vec3 wh) {\n float NdotWh = abs(wh.z);//AbsCosTheta(wh);\n float NdotWo = abs(wo.z);//AbsCosTheta(wo);\n float NdotWi = abs(wi.z);//AbsCosTheta(wi);\n float WOdotWh = abs(dot(wo, wh));\n\n return min(1., min((2. * NdotWh * NdotWo / WOdotWh), (2. * NdotWh * NdotWi / WOdotWh)));\n}\n\nvec3 Microfacet_F(in VolumeShader shader, in vec3 wo, in vec3 wi) {\n float cosThetaO = abs(wo.z);//AbsCosTheta(wo);\n float cosThetaI = abs(wi.z);//AbsCosTheta(wi);\n\n if (cosThetaI == 0. || cosThetaO == 0.)\n return BLACK;\n\n vec3 wh = wi + wo;\n\n if (wh.x == 0. && wh.y == 0. && wh.z == 0.)\n return BLACK;\n\n wh = normalize(wh);\n float cosThetaH = dot(wi, wh);\n\n vec3 F = WHITE;//m_Fresnel.Evaluate(cosThetaH);\n\n return shader.m_R * Blinn_D(shader, wh) * Microfacet_G(shader, wo, wi, wh) * F / (4. * cosThetaI * cosThetaO);\n}\n\nvec3 ShaderBsdf_WorldToLocal(in VolumeShader shader, in vec3 W) {\n return vec3(dot(W, shader.m_Nu), dot(W, shader.m_Nv), dot(W, shader.m_Nn));\n}\n\nvec3 ShaderBsdf_LocalToWorld(in VolumeShader shader, in vec3 W) {\n return vec3(\n shader.m_Nu.x * W.x + shader.m_Nv.x * W.y + shader.m_Nn.x * W.z,\n shader.m_Nu.y * W.x + shader.m_Nv.y * W.y + shader.m_Nn.y * W.z,\n shader.m_Nu.z * W.x + shader.m_Nv.z * W.y + shader.m_Nn.z * W.z);\n}\n\nfloat Blinn_Pdf(in VolumeShader shader, in vec3 Wo, in vec3 Wi) {\n vec3 wh = normalize(Wo + Wi);\n\n float costheta = abs(wh.z);//AbsCosTheta(wh);\n // Compute PDF for wi from Blinn distribution\n float blinn_pdf = ((shader.m_Exponent + 1.) * pow(costheta, shader.m_Exponent)) / (2. * PI * 4. * dot(Wo, wh));\n\n if (dot(Wo, wh) <= 0.0)\n blinn_pdf = 0.0;\n\n return blinn_pdf;\n}\n\nvec3 Microfacet_SampleF(in VolumeShader shader, in vec3 wo, out vec3 wi, out float Pdf, in vec2 U) {\n Blinn_SampleF(shader, wo, wi, Pdf, U);\n\n if (!SameHemisphere(wo, wi))\n return BLACK;\n\n return Microfacet_F(shader, wo, wi);\n}\n\nfloat Microfacet_Pdf(in VolumeShader shader, in vec3 wo, in vec3 wi) {\n if (!SameHemisphere(wo, wi))\n return 0.0;\n\n return Blinn_Pdf(shader, wo, wi);\n}\n\n// return a xyz color\nvec3 ShaderBsdf_F(in VolumeShader shader, in vec3 Wo, in vec3 Wi) {\n vec3 Wol = ShaderBsdf_WorldToLocal(shader, Wo);\n vec3 Wil = ShaderBsdf_WorldToLocal(shader, Wi);\n\n vec3 R = vec3(0, 0, 0);\n\n R += Lambertian_F(shader, Wol, Wil);\n R += Microfacet_F(shader, Wol, Wil);\n\n return R;\n}\n\nfloat ShaderBsdf_Pdf(in VolumeShader shader, in vec3 Wo, in vec3 Wi) {\n vec3 Wol = ShaderBsdf_WorldToLocal(shader, Wo);\n vec3 Wil = ShaderBsdf_WorldToLocal(shader, Wi);\n\n float Pdf = 0.0;\n\n Pdf += Lambertian_Pdf(shader, Wol, Wil);\n Pdf += Microfacet_Pdf(shader, Wol, Wil);\n\n return Pdf;\n}\n\nvec3 ShaderBsdf_SampleF(in VolumeShader shader, in LightingSample S, in vec3 Wo, out vec3 Wi, out float Pdf, in vec2 U) {\n vec3 Wol = ShaderBsdf_WorldToLocal(shader, Wo);\n vec3 Wil = vec3(0, 0, 0);\n\n vec3 R = vec3(0, 0, 0);\n\n if (S.m_bsdfComponent <= 0.5) {\n Lambertian_SampleF(shader, Wol, Wil, Pdf, S.m_bsdfDir);\n } else {\n Microfacet_SampleF(shader, Wol, Wil, Pdf, S.m_bsdfDir);\n }\n\n Pdf += Lambertian_Pdf(shader, Wol, Wil);\n Pdf += Microfacet_Pdf(shader, Wol, Wil);\n\n R += Lambertian_F(shader, Wol, Wil);\n R += Microfacet_F(shader, Wol, Wil);\n\n Wi = ShaderBsdf_LocalToWorld(shader, Wil);\n\n //return vec3(1,1,1);\n return R;\n}\n\n// return a xyz color\nvec3 Shader_F(in VolumeShader shader, in vec3 Wo, in vec3 Wi) {\n if (shader.m_Type == 0) {\n return ShaderBsdf_F(shader, Wo, Wi);\n } else {\n return ShaderPhase_F(shader, Wo, Wi);\n }\n}\n\nfloat Shader_Pdf(in VolumeShader shader, in vec3 Wo, in vec3 Wi) {\n if (shader.m_Type == 0) {\n return ShaderBsdf_Pdf(shader, Wo, Wi);\n } else {\n return ShaderPhase_Pdf(shader, Wo, Wi);\n }\n}\n\nvec3 Shader_SampleF(in VolumeShader shader, in LightingSample S, in vec3 Wo, out vec3 Wi, out float Pdf, in vec2 U) {\n //return vec3(1,0,0);\n if (shader.m_Type == 0) {\n return ShaderBsdf_SampleF(shader, S, Wo, Wi, Pdf, U);\n } else {\n return ShaderPhase_SampleF(shader, Wo, Wi, Pdf, U);\n }\n}\n\nbool IsBlack(in vec3 v) {\n return (v.x == 0.0 && v.y == 0.0 && v.z == 0.0);\n}\n\nfloat PowerHeuristic(float nf, float fPdf, float ng, float gPdf) {\n float f = nf * fPdf;\n float g = ng * gPdf;\n // The power heuristic is Veach's MIS balance heuristic except each component is being squared\n // balance heuristic would be f/(f+g) ...?\n return (f * f) / (f * f + g * g);\n}\n\nfloat MISContribution(float pdf1, float pdf2) {\n return PowerHeuristic(1.0, pdf1, 1.0, pdf2);\n}\n\n// \"shadow ray\" using gStepSizeShadow, test whether it can exit the volume or not\nbool DoesSecondaryRayScatterInVolume(inout Ray R, inout uvec2 seed) {\n float MinT;\n float MaxT;\n vec3 Ps;\n\n if (!IntersectBox(R, MinT, MaxT))\n return false;\n\n MinT = max(MinT, R.m_MinT);\n MaxT = min(MaxT, R.m_MaxT);\n\n // delta (Woodcock) tracking\n float S = -log(rand(seed)) / gDensityScale;\n float Sum = 0.0;\n float SigmaT = 0.0;\n\n MinT += rand(seed) * gStepSizeShadow;\n int ch = 0;\n float intensity = 0.0;\n while (Sum < S) {\n Ps = rayAt(R, MinT); // R.m_O + MinT * R.m_D;\n\n if (MinT > MaxT)\n return false;\n\n intensity = GetNormalizedIntensityMax4ch(Ps, ch);\n SigmaT = gDensityScale * GetOpacity(intensity, ch);\n\n Sum += SigmaT * gStepSizeShadow;\n MinT += gStepSizeShadow;\n }\n\n return true;\n}\n\nint GetNearestLight(Ray R, out vec3 oLightColor, out vec3 Pl, out float oPdf) {\n int hit = -1;\n float T = 0.0;\n Ray rayCopy = R;\n float pdf = 0.0;\n\n for (int i = 0; i < 2; i++) {\n if (Light_Intersect(gLights[i], rayCopy, T, oLightColor, pdf)) {\n Pl = rayAt(R, T);\n hit = i;\n }\n }\n oPdf = pdf;\n\n return hit;\n}\n\n// return a XYZ color\n// Wo is direction from scatter point out toward incident ray direction\n\n// Wi goes toward light sample and is not necessarily perfect reflection of Wo\n// ^Wi ^N ^Wo\n// \\\\ | //\n// \\\\ | //\n// \\\\ | //\n// \\\\ | //\n// \\\\|// Pe = volume sample where scattering occurs\n// ---------\nvec3 EstimateDirectLight(int shaderType, float Density, int ch, in Light light, in LightingSample LS, in vec3 Wo, in vec3 Pe, in vec3 N, inout uvec2 seed) {\n vec3 Ld = BLACK, Li = BLACK, F = BLACK;\n\n vec3 diffuse = GetDiffuseN(Density, ch);\n vec3 specular = GetSpecularN(Density, ch);\n float glossiness = GetGlossinessN(Density, ch);\n\n // can N and Wo be coincident????\n vec3 nu = normalize(cross(N, Wo));\n vec3 nv = normalize(cross(N, nu));\n\n // the IoR here is hard coded... and unused!!!!\n VolumeShader Shader = VolumeShader(shaderType, RGBtoXYZ(diffuse), RGBtoXYZ(specular), 2.5, glossiness, N, nu, nv);\n\n float LightPdf = 1.0, ShaderPdf = 1.0;\n\n Ray Rl = Ray(vec3(0, 0, 0), vec3(0, 0, 1.0), 0.0, MAX_RAY_LEN);\n // Rl is ray from light toward Pe in volume, with a max traversal of the distance from Pe to Light sample pos.\n Li = Light_SampleL(light, Pe, Rl, LightPdf, LS);\n\n // Wi: negate ray direction: from volume scatter point toward light...?\n vec3 Wi = -Rl.m_D, P = vec3(0, 0, 0);\n\n // we will calculate two lighting contributions and combine them by MIS.\n\n F = Shader_F(Shader, Wo, Wi);\n\n ShaderPdf = Shader_Pdf(Shader, Wo, Wi);\n\n // get a lighting contribution along Rl; see if Rl would scatter in the volume or not\n if (!IsBlack(Li) && (ShaderPdf > 0.0) && (LightPdf > 0.0) && !DoesSecondaryRayScatterInVolume(Rl, seed)) {\n // ray from light can see through volume to Pe!\n\n float dotProd = 1.0;\n if (shaderType == ShaderType_Brdf) {\n\n // (use abs or clamp here?)\n dotProd = abs(dot(Wi, N));\n }\n Ld += F * Li * dotProd * MISContribution(LightPdf, ShaderPdf) / LightPdf;\n\n }\n\n // get a lighting contribution by sampling nearest light from the scattering point\n F = Shader_SampleF(Shader, LS, Wo, Wi, ShaderPdf, LS.m_bsdfDir);\n if (!IsBlack(F) && (ShaderPdf > 0.0)) {\n vec3 Pl = vec3(0, 0, 0);\n int n = GetNearestLight(Ray(Pe, Wi, 0.0, 1000000.0), Li, Pl, LightPdf);\n if (n > -1) {\n Light pLight = gLights[n];\n LightPdf = Light_Pdf(pLight, Pe, Wi);\n\n if ((LightPdf > 0.0) && !IsBlack(Li)) {\n Ray rr = Ray(Pl, normalize(Pe - Pl), 0.0, length(Pe - Pl));\n if (!DoesSecondaryRayScatterInVolume(rr, seed)) {\n float dotProd = 1.0;\n if (shaderType == ShaderType_Brdf) {\n\n // (use abs or clamp here?)\n dotProd = abs(dot(Wi, N));\n }\n // note order of MIS params is swapped\n Ld += F * Li * dotProd * MISContribution(ShaderPdf, LightPdf) / ShaderPdf;\n }\n\n }\n }\n }\n\n return Ld;\n\n}\n\n// return a linear xyz color\nvec3 UniformSampleOneLight(int shaderType, float Density, int ch, in vec3 Wo, in vec3 Pe, in vec3 N, inout uvec2 seed) {\n //if (NUM_LIGHTS == 0)\n // return BLACK;\n\n // select a random light, a random 2d sample on light, and a random 2d sample on brdf\n LightingSample LS = LightingSample_LargeStep(seed);\n\n int WhichLight = int(floor(LS.m_LightNum * float(NUM_LIGHTS)));\n\n Light light = gLights[WhichLight];\n\n return float(NUM_LIGHTS) * EstimateDirectLight(shaderType, Density, ch, light, LS, Wo, Pe, N, seed);\n\n}\n\nbool SampleScatteringEvent(inout Ray R, inout uvec2 seed, out vec3 Ps) {\n float MinT;\n float MaxT;\n\n if (!IntersectBox(R, MinT, MaxT))\n return false;\n\n MinT = max(MinT, R.m_MinT);\n MaxT = min(MaxT, R.m_MaxT);\n\n // delta (Woodcock) tracking\n\n // notes, not necessarily coherent:\n // ray march along the ray's projected path and keep an average sigmaT value.\n // The distance is weighted by the intensity at each ray step sample. High intensity increases the apparent distance.\n // When the distance has become greater than the average sigmaT value given by -log(RandomFloat[0, 1]) / averageSigmaT\n // then that would be considered the interaction position.\n\n // sigmaT = sigmaA + sigmaS = absorption coeff + scattering coeff = extinction coeff\n\n // Beer-Lambert law: transmittance T(t) = exp(-sigmaT*t) where t is a distance!\n\n // importance sampling the exponential function to produce a free path distance S\n // the PDF is p(t) = sigmaT * exp(-sigmaT * t)\n // In a homogeneous volume,\n // S is the free-path distance = -ln(1-zeta)/sigmaT where zeta is a random variable\n // density scale = 0 => S --> 0..inf. Low density means randomly sized ray paths\n // density scale = inf => S --> 0. High density means short ray paths!\n\n // note that ln(x:0..1) is negative\n\n // here gDensityScale represents sigmaMax, a majorant of sigmaT\n // it is a parameter that should be set as close to the max extinction coefficient as possible.\n float S = -log(rand(seed)) / gDensityScale;\n\n float Sum = 0.0;\n float SigmaT = 0.0; // accumulated extinction along ray march\n\n // start: take one step now.\n MinT += rand(seed) * gStepSize;\n\n int ch = 0;\n float intensity = 0.0;\n\n // ray march until we have traveled S (or hit the maxT of the ray)\n while (Sum < S) {\n Ps = rayAt(R, MinT); // R.m_O + MinT * R.m_D;\n\n // if we exit the volume with no scattering\n if (MinT > MaxT)\n return false;\n\n intensity = GetNormalizedIntensityMax4ch(Ps, ch);\n SigmaT = gDensityScale * GetOpacity(intensity, ch);\n\n Sum += SigmaT * gStepSize;\n MinT += gStepSize;\n }\n\n // at this time, MinT - original MinT is the T transmission distance before a scatter event.\n // Ps is the point\n\n return true;\n}\n\nvec4 CalculateRadiance(inout uvec2 seed) {\n float r = rand(seed);\n //return vec4(r,0,0,1);\n\n vec3 Lv = BLACK, Li = BLACK;\n\n //Ray Re = Ray(vec3(0,0,0), vec3(0,0,1), 0.0, MAX_RAY_LEN);\n\n vec2 UV = vUv * uResolution + vec2(rand(seed), rand(seed));\n\n Ray Re = GenerateCameraRay(gCamera, UV, vec2(rand(seed), rand(seed)));\n\n //return vec4(vUv, 0.0, 1.0);\n //return vec4(0.5*(Re.m_D + 1.0), 1.0);\n //return vec4(Re.m_D, 1.0);\n\n //Re.m_MinT = 0.0f;\n //Re.m_MaxT = MAX_RAY_LEN;\n\n vec3 Pe = vec3(0, 0, 0), Pl = vec3(0, 0, 0);\n float lpdf = 0.0;\n\n float alpha = 0.0;\n // find point Pe along ray Re\n if (SampleScatteringEvent(Re, seed, Pe)) {\n alpha = 1.0;\n // is there a light between Re.m_O and Pe? (ray's maxT is distance to Pe)\n // (test to see if area light was hit before volume.)\n int i = GetNearestLight(Ray(Re.m_O, Re.m_D, 0.0, length(Pe - Re.m_O)), Li, Pl, lpdf);\n if (i > -1) {\n // set sample pixel value in frame estimate (prior to accumulation)\n return vec4(Li, 1.0);\n }\n\n int ch = 0;\n float D = GetNormalizedIntensityMax4ch(Pe, ch);\n\n // emission from volume\n Lv += RGBtoXYZ(GetEmissionN(D, ch));\n\n vec3 gradient = Gradient4ch(Pe, ch);\n // send ray out from Pe toward light\n switch (gShadingType) {\n case ShaderType_Brdf: {\n Lv += UniformSampleOneLight(ShaderType_Brdf, D, ch, normalize(-Re.m_D), Pe, normalize(gradient), seed);\n break;\n }\n\n case ShaderType_Phase: {\n Lv += 0.5 * UniformSampleOneLight(ShaderType_Phase, D, ch, normalize(-Re.m_D), Pe, normalize(gradient), seed);\n break;\n }\n\n case ShaderType_Mixed: {\n //const float GradMag = GradientMagnitude(Pe, volumedata.gradientVolumeTexture[ch]) * (1.0/volumedata.intensityMax[ch]);\n float GradMag = length(gradient);\n float PdfBrdf = (1.0 - exp(-gGradientFactor * GradMag));\n\n vec3 cls; // xyz color\n if (rand(seed) < PdfBrdf) {\n cls = UniformSampleOneLight(ShaderType_Brdf, D, ch, normalize(-Re.m_D), Pe, normalize(gradient), seed);\n } else {\n cls = 0.5 * UniformSampleOneLight(ShaderType_Phase, D, ch, normalize(-Re.m_D), Pe, normalize(gradient), seed);\n }\n\n Lv += cls;\n\n break;\n }\n }\n } else {\n // background color:\n // set Lv to a selected color based on environment light source?\n // if (uShowLights > 0.0) {\n // int n = GetNearestLight(Ray(Re.m_O, Re.m_D, 0.0f, 1000000.0f), Li, Pl, lpdf);\n // if (n > -1)\n // Lv = Li;\n // }\n //Lv = vec3(r,0,0);\n }\n\n // set sample pixel value in frame estimate (prior to accumulation)\n\n return vec4(Lv, alpha);\n}\n\nvec4 CumulativeMovingAverage(vec4 A, vec4 Ax, float N) {\n return A + ((Ax - A) / max((N), 1.0));\n}\n\nvoid main() {\n // seed for rand(seed) function\n uvec2 seed = uvec2(uFrameCounter, uFrameCounter + 1.0) * uvec2(gl_FragCoord);\n\n // perform path tracing and get resulting pixel color\n vec4 pixelColor = CalculateRadiance(seed);\n\n vec4 previousColor = texture(tPreviousTexture, vUv);\n if (uSampleCounter < 1.0) {\n previousColor = vec4(0, 0, 0, 0);\n }\n\n pc_fragColor = CumulativeMovingAverage(previousColor, pixelColor, uSampleCounter);\n}\n";
|
|
5
5
|
export const pathTracingFragmentShaderSrc = pathTraceFragmentShader;
|
|
6
6
|
|
|
7
7
|
// Must match values in shader code above.
|
|
@@ -2,7 +2,7 @@ import { Vector2, Vector3, Matrix4, Texture } from "three";
|
|
|
2
2
|
/* babel-plugin-inline-import './shaders/raymarch.vert' */
|
|
3
3
|
const rayMarchVertexShader = "// switch on high precision floats\n#ifdef GL_ES\nprecision highp float;\n#endif\n\nvarying vec3 pObj;\n\nvoid main() {\n pObj = position;\n gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);\n}\n";
|
|
4
4
|
/* babel-plugin-inline-import './shaders/volumePick.frag' */
|
|
5
|
-
const rayMarchFragmentShader = "\n#ifdef GL_ES\nprecision highp float;\nprecision highp usampler2D;\n#endif\n\n#define M_PI 3.14159265358979323846\n\nuniform vec2 iResolution;\nuniform vec2 textureRes;\n\n//uniform float maskAlpha;\nuniform uvec2 ATLAS_DIMS;\nuniform vec3 AABB_CLIP_MIN;\nuniform float CLIP_NEAR;\nuniform vec3 AABB_CLIP_MAX;\nuniform float CLIP_FAR;\n// one raw channel atlas that has segmentation data\nuniform usampler2D textureAtlas;\n//uniform sampler2D textureAtlasMask;\nuniform sampler2D textureDepth;\nuniform int usingPositionTexture;\nuniform int BREAK_STEPS;\nuniform float SLICES;\nuniform float isOrtho;\nuniform float orthoThickness;\nuniform float orthoScale;\nuniform int maxProject;\nuniform vec3 flipVolume;\nuniform vec3 volumeScale;\n\n// view space to axis-aligned volume box\nuniform mat4 inverseModelViewMatrix;\nuniform mat4 inverseProjMatrix;\n\nvarying vec3 pObj;\n\nfloat powf(float a, float b) {\n return pow(a,b);\n}\n\nfloat rand(vec2 co) {\n float threadId = gl_FragCoord.x/(gl_FragCoord.y + 1.0);\n float bigVal = threadId*1299721.0/911.0;\n vec2 smallVal = vec2(threadId*7927.0/577.0, threadId*104743.0/1039.0);\n return fract(sin(dot(co, smallVal)) * bigVal);\n}\n\n// get the uv offset into the atlas for the given z slice\n// ATLAS_DIMS is the number of z slices across the atlas texture\nvec2 offsetFrontBack(uint a) {\n uint ax = ATLAS_DIMS.x;\n vec2 tiles = vec2(1.
|
|
5
|
+
const rayMarchFragmentShader = "\n#ifdef GL_ES\nprecision highp float;\nprecision highp usampler2D;\n#endif\n\n#define M_PI 3.14159265358979323846\n\nuniform vec2 iResolution;\nuniform vec2 textureRes;\n\n//uniform float maskAlpha;\nuniform uvec2 ATLAS_DIMS;\nuniform vec3 AABB_CLIP_MIN;\nuniform float CLIP_NEAR;\nuniform vec3 AABB_CLIP_MAX;\nuniform float CLIP_FAR;\n// one raw channel atlas that has segmentation data\nuniform usampler2D textureAtlas;\n//uniform sampler2D textureAtlasMask;\nuniform sampler2D textureDepth;\nuniform int usingPositionTexture;\nuniform int BREAK_STEPS;\nuniform float SLICES;\nuniform float isOrtho;\nuniform float orthoThickness;\nuniform float orthoScale;\nuniform int maxProject;\nuniform vec3 flipVolume;\nuniform vec3 volumeScale;\n\n// view space to axis-aligned volume box\nuniform mat4 inverseModelViewMatrix;\nuniform mat4 inverseProjMatrix;\n\nvarying vec3 pObj;\n\nfloat powf(float a, float b) {\n return pow(a, b);\n}\n\nfloat rand(vec2 co) {\n float threadId = gl_FragCoord.x / (gl_FragCoord.y + 1.0);\n float bigVal = threadId * 1299721.0 / 911.0;\n vec2 smallVal = vec2(threadId * 7927.0 / 577.0, threadId * 104743.0 / 1039.0);\n return fract(sin(dot(co, smallVal)) * bigVal);\n}\n\n// get the uv offset into the atlas for the given z slice\n// ATLAS_DIMS is the number of z slices across the atlas texture\nvec2 offsetFrontBack(uint a) {\n uint ax = ATLAS_DIMS.x;\n vec2 tiles = vec2(1.0 / float(ATLAS_DIMS.x), 1.0 / float(ATLAS_DIMS.y));\n vec2 os = vec2(float(a % ax), float(a / ax)) * tiles;\n return clamp(os, vec2(0.0), vec2(1.0) - vec2(1.0) * tiles);\n}\n\nuint sampleAtlasNearest(usampler2D tex, vec4 pos) {\n uint bounds = uint(pos[0] >= 0.0 && pos[0] <= 1.0 &&\n pos[1] >= 0.0 && pos[1] <= 1.0 &&\n pos[2] >= 0.0 && pos[2] <= 1.0);\n float nSlices = float(SLICES);\n\n // ascii art of a texture atlas:\n // +------------------+\n // | 0 | 1 | 2 | 3 |\n // +------------------+\n // | 4 | 5 | 6 | 7 | \n // +------------------+\n // | 8 | 9 |10 |11 |\n // +------------------+\n // |12 |13 |14 |15 |\n // +------------------+\n // Each tile is one z-slice of the 3D texture, which has been flattened\n // into an atlased 2D texture.\n\n // pos.xy is 0-1 range. Apply the xy flip here and then divide by number of tiles in x and y to normalize\n // to a single tile. This results in a uv coordinate that's in the correct X and Y position but only for\n // the first tile (z slice) of the atlas texture, z=0.\n vec2 loc0 = ((pos.xy - 0.5) * flipVolume.xy + 0.5) / vec2(float(ATLAS_DIMS.x), float(ATLAS_DIMS.y));\n\n // Next, offset the UV coordinate so we are sampling in the correct Z slice.\n // Round z to the nearest (floor) slice\n float z = min(floor(pos.z * nSlices), nSlices - 1.0);\n // flip z coordinate if needed\n if (flipVolume.z == -1.0) {\n z = nSlices - z - 1.0;\n }\n\n // calculate the offset to the z slice in the atlas texture\n vec2 o = offsetFrontBack(uint(z)) + loc0;\n //uint voxelColor = texture2D(tex, o).x;\n uint voxelColor = texelFetch(tex, ivec2(o * textureRes), 0).x;\n\n // Apply mask\n // float voxelMask = texture2D(textureAtlasMask, o).x;\n // voxelMask = mix(voxelMask, 1.0, maskAlpha);\n // voxelColor.rgb *= voxelMask;\n\n return bounds * voxelColor;\n}\n\nbool intersectBox(\n in vec3 r_o,\n in vec3 r_d,\n in vec3 boxMin,\n in vec3 boxMax,\n out float tnear,\n out float tfar\n) {\n // compute intersection of ray with all six bbox planes\n vec3 invR = vec3(1.0, 1.0, 1.0) / r_d;\n vec3 tbot = invR * (boxMin - r_o);\n vec3 ttop = invR * (boxMax - r_o);\n\n // re-order intersections to find smallest and largest on each axis\n vec3 tmin = min(ttop, tbot);\n vec3 tmax = max(ttop, tbot);\n\n // find the largest tmin and the smallest tmax\n float largest_tmin = max(max(tmin.x, tmin.y), tmin.z);\n float smallest_tmax = min(min(tmax.x, tmax.y), tmax.z);\n\n tnear = largest_tmin;\n tfar = smallest_tmax;\n\n // use >= here?\n return (smallest_tmax > largest_tmin);\n}\n\nvec4 integrateVolume(\n vec4 eye_o,\n vec4 eye_d,\n float tnear,\n float tfar,\n float clipNear,\n float clipFar,\n usampler2D textureAtlas\n) {\n uint C = 0u;\n // march along ray from front to back, accumulating color\n\n // estimate step length\n const int maxSteps = 512;\n // modify the 3 components of eye_d by volume scale\n float scaledSteps = float(BREAK_STEPS) * length((eye_d.xyz / volumeScale));\n float csteps = clamp(float(scaledSteps), 1.0, float(maxSteps));\n float invstep = (tfar - tnear) / csteps;\n // Removed random ray dither to prevent artifacting\n float r = 0.0; // (SLICES==1.0) ? 0.0 : rand(eye_d.xy);\n // if ortho and clipped, make step size smaller so we still get same number of steps\n float tstep = invstep * orthoThickness;\n float tfarsurf = r * tstep;\n float overflow = mod((tfarsurf - tfar), tstep); // random dithering offset\n float t = tnear + overflow;\n t += r * tstep; // random dithering offset\n float tdist = 0.0;\n int numSteps = 0;\n vec4 pos, col;\n for (int i = 0; i < maxSteps; i++) {\n pos = eye_o + eye_d * t;\n // !!! assume box bounds are -0.5 .. 0.5. pos = (pos-min)/(max-min)\n // scaling is handled by model transform and already accounted for before we get here.\n // AABB clip is independent of this and is only used to determine tnear and tfar.\n pos.xyz = (pos.xyz - (-0.5)) / ((0.5) - (-0.5)); //0.5 * (pos + 1.0); // map position from [boxMin, boxMax] to [0, 1] coordinates\n\n uint col = sampleAtlasNearest(textureAtlas, pos);\n\n // FOR INTERSECTION / PICKING, the FIRST nonzero intensity terminates the raymarch\n\n if (maxProject != 0) {\n C = max(col, C);\n } else {\n if (col > 0u) {\n C = col;\n break;\n }\n }\n t += tstep;\n numSteps = i;\n\n if (t > tfar || t > tnear + clipFar) {\n break;\n }\n }\n\n return vec4(float(C));\n}\n\nvoid main() {\n gl_FragColor = vec4(0.0);\n vec2 vUv = gl_FragCoord.xy / iResolution.xy;\n\n vec3 eyeRay_o, eyeRay_d;\n\n if (isOrtho == 0.0) {\n // for perspective rays:\n // world space camera coordinates\n // transform to object space\n eyeRay_o = (inverseModelViewMatrix * vec4(0.0, 0.0, 0.0, 1.0)).xyz;\n eyeRay_d = normalize(pObj - eyeRay_o);\n } else {\n // for ortho rays:\n float zDist = 2.0;\n eyeRay_d = (inverseModelViewMatrix * vec4(0.0, 0.0, -zDist, 0.0)).xyz;\n vec4 ray_o = vec4(2.0 * vUv - 1.0, 1.0, 1.0);\n ray_o.xy *= orthoScale;\n ray_o.x *= iResolution.x / iResolution.y;\n eyeRay_o = (inverseModelViewMatrix * ray_o).xyz;\n }\n\n // -0.5..0.5 is full box. AABB_CLIP lets us clip to a box shaped ROI to look at\n // I am applying it here at the earliest point so that the ray march does\n // not waste steps. For general shaped ROI, this has to be handled more\n // generally (obviously)\n vec3 boxMin = AABB_CLIP_MIN;\n vec3 boxMax = AABB_CLIP_MAX;\n\n float tnear, tfar;\n bool hit = intersectBox(eyeRay_o, eyeRay_d, boxMin, boxMax, tnear, tfar);\n\n if (!hit) {\n // return background color if ray misses the cube\n // is this safe to do when there is other geometry / gObjects drawn?\n gl_FragColor = vec4(0.0); //C1;//vec4(0.0);\n return;\n }\n\n float clipNear = 0.0;//-(dot(eyeRay_o.xyz, eyeNorm) + dNear) / dot(eyeRay_d.xyz, eyeNorm);\n float clipFar = 10000.0;//-(dot(eyeRay_o.xyz,-eyeNorm) + dFar ) / dot(eyeRay_d.xyz,-eyeNorm);\n\n // Sample the depth/position texture\n // If this is a depth texture, the r component is a depth value. If this is a position texture,\n // the xyz components are a view space position and w is 1.0 iff there's a mesh at this fragment.\n vec4 meshPosSample = texture2D(textureDepth, vUv);\n // Note: we make a different check for whether a mesh is present with depth vs. position textures.\n // Here's the check for depth textures:\n bool hasDepthValue = usingPositionTexture == 0 && meshPosSample.r < 1.0;\n\n // If there's a depth-contributing mesh at this fragment, we may need to terminate the ray early\n if (hasDepthValue || (usingPositionTexture == 1 && meshPosSample.a > 0.0)) {\n if (hasDepthValue) {\n // We're working with a depth value, so we need to convert back to view space position\n // Get a projection space position from depth and uv, and unproject back to view space\n vec4 meshProj = vec4(vUv * 2.0 - 1.0, meshPosSample.r * 2.0 - 1.0, 1.0);\n vec4 meshView = inverseProjMatrix * meshProj;\n meshPosSample = vec4(meshView.xyz / meshView.w, 1.0);\n }\n // Transform the mesh position to object space\n vec4 meshObj = inverseModelViewMatrix * meshPosSample;\n\n // Derive a t value for the mesh intersection\n // NOTE: divides by 0 when `eyeRay_d.z` is 0. Could be mitigated by picking another component\n // to derive with when z is 0, but I found this was rare enough in practice to be acceptable.\n float tMesh = (meshObj.z - eyeRay_o.z) / eyeRay_d.z;\n if (tMesh < tfar) {\n clipFar = tMesh - tnear;\n }\n }\n\n // tnear and tfar are intersections of box\n vec4 C = integrateVolume(vec4(eyeRay_o, 1.0), vec4(eyeRay_d, 0.0), tnear, tfar, clipNear, clipFar, textureAtlas);\n\n gl_FragColor = C;\n return;\n}\n";
|
|
6
6
|
export const pickVertexShaderSrc = rayMarchVertexShader;
|
|
7
7
|
export const pickFragmentShaderSrc = rayMarchFragmentShader;
|
|
8
8
|
export const pickShaderUniforms = () => {
|
|
@@ -2,7 +2,7 @@ import { Vector2, Vector3, Matrix4, Texture } from "three";
|
|
|
2
2
|
/* babel-plugin-inline-import './shaders/raymarch.vert' */
|
|
3
3
|
const rayMarchVertexShader = "// switch on high precision floats\n#ifdef GL_ES\nprecision highp float;\n#endif\n\nvarying vec3 pObj;\n\nvoid main() {\n pObj = position;\n gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);\n}\n";
|
|
4
4
|
/* babel-plugin-inline-import './shaders/raymarch.frag' */
|
|
5
|
-
const rayMarchFragmentShader = "\n#ifdef GL_ES\nprecision highp float;\n#endif\n\n#define M_PI 3.14159265358979323846\n\nuniform vec2 iResolution;\nuniform vec2 textureRes;\nuniform float GAMMA_MIN;\nuniform float GAMMA_MAX;\nuniform float GAMMA_SCALE;\nuniform float BRIGHTNESS;\nuniform float DENSITY;\nuniform float maskAlpha;\nuniform vec2 ATLAS_DIMS;\nuniform vec3 AABB_CLIP_MIN;\nuniform float CLIP_NEAR;\nuniform vec3 AABB_CLIP_MAX;\nuniform float CLIP_FAR;\nuniform sampler2D textureAtlas;\nuniform sampler2D textureAtlasMask;\nuniform sampler2D textureDepth;\nuniform int usingPositionTexture;\nuniform int BREAK_STEPS;\nuniform float SLICES;\nuniform float isOrtho;\nuniform float orthoThickness;\nuniform float orthoScale;\nuniform int maxProject;\nuniform bool interpolationEnabled;\nuniform vec3 flipVolume;\nuniform vec3 volumeScale;\n\n// view space to axis-aligned volume box\nuniform mat4 inverseModelViewMatrix;\nuniform mat4 inverseProjMatrix;\n\nvarying vec3 pObj;\n\nfloat powf(float a, float b) {\n return pow(a,b);\n}\n\nfloat rand(vec2 co) {\n float threadId = gl_FragCoord.x/(gl_FragCoord.y + 1.0);\n float bigVal = threadId*1299721.0/911.0;\n vec2 smallVal = vec2(threadId*7927.0/577.0, threadId*104743.0/1039.0);\n return fract(sin(dot(co, smallVal)) * bigVal);\n}\n\nvec4 luma2Alpha(vec4 color, float vmin, float vmax, float C) {\n float x = dot(color.rgb, vec3(0.2125, 0.7154, 0.0721));\n // float x = max(color[2], max(color[0],color[1]));\n float xi = (x-vmin)/(vmax-vmin);\n xi = clamp(xi,0.0,1.0);\n float y = pow(xi,C);\n y = clamp(y,0.0,1.0);\n color[3] = y;\n return color;\n}\n\nvec2 offsetFrontBack(float t) {\n int a = int(t);\n int ax = int(ATLAS_DIMS.x);\n vec2 os = vec2(float(a - (a / ax) * ax), float(a / ax)) / ATLAS_DIMS;\n return clamp(os, vec2(0.0), vec2(1.0) - vec2(1.0) / ATLAS_DIMS);\n}\n\nvec4 sampleAtlasLinear(sampler2D tex, vec4 pos) {\n float bounds = float(pos[0] >= 0.0 && pos[0] <= 1.0 &&\n pos[1] >= 0.0 && pos[1] <= 1.0 &&\n pos[2] >= 0.0 && pos[2] <= 1.0 );\n float nSlices = float(SLICES);\n // get location within atlas tile\n // TODO: get loc1 which follows ray to next slice along ray direction\n // when flipvolume = 1: pos\n // when flipvolume = -1: 1-pos\n vec2 loc0 = ((pos.xy - 0.5) * flipVolume.xy + 0.5) / ATLAS_DIMS;\n\n // loc ranges from 0 to 1/ATLAS_DIMS\n // shrink loc0 to within one half edge texel - so as not to sample across edges of tiles.\n loc0 = vec2(0.5) / textureRes + loc0 * (vec2(1.0) - ATLAS_DIMS / textureRes);\n \n // interpolate between two slices\n float z = (pos.z)*(nSlices-1.0);\n float z0 = floor(z);\n float t = z-z0; //mod(z, 1.0);\n float z1 = min(z0+1.0, nSlices-1.0);\n\n // flipped:\n if (flipVolume.z == -1.0) {\n z0 = nSlices - z0 - 1.0;\n z1 = nSlices - z1 - 1.0;\n t = 1.0 - t;\n }\n\n // get slice offsets in texture atlas\n vec2 o0 = offsetFrontBack(z0) + loc0;\n vec2 o1 = offsetFrontBack(z1) + loc0;\n\n vec4 slice0Color = texture2D(tex, o0);\n vec4 slice1Color = texture2D(tex, o1);\n // NOTE we could premultiply the mask in the fuse function,\n // but that is slower to update the maskAlpha value than here in the shader.\n // it is a memory vs perf tradeoff. Do users really need to update the maskAlpha at realtime speed?\n float slice0Mask = texture2D(textureAtlasMask, o0).x;\n float slice1Mask = texture2D(textureAtlasMask, o1).x;\n // or use max for conservative 0 or 1 masking?\n float maskVal = mix(slice0Mask, slice1Mask, t);\n // take mask from 0..1 to alpha..1\n maskVal = mix(maskVal, 1.0, maskAlpha);\n vec4 retval = mix(slice0Color, slice1Color, t);\n // only mask the rgb, not the alpha(?)\n retval.rgb *= maskVal;\n return bounds*retval;\n}\n\nvec4 sampleAtlasNearest(sampler2D tex, vec4 pos) {\n float bounds = float(pos[0] >= 0.0 && pos[0] <= 1.0 &&\n pos[1] >= 0.0 && pos[1] <= 1.0 &&\n pos[2] >= 0.0 && pos[2] <= 1.0 );\n float nSlices = float(SLICES);\n\n vec2 loc0 = ((pos.xy - 0.5) * flipVolume.xy + 0.5) / ATLAS_DIMS;\n\n // No interpolation - sample just one slice at a pixel center.\n // Ideally this would be accomplished in part by switching this texture to linear\n // filtering, but three makes this difficult to do through a WebGLRenderTarget.\n loc0 = floor(loc0 * textureRes) / textureRes;\n loc0 += vec2(0.5) / textureRes;\n\n float z = min(floor(pos.z * nSlices), nSlices-1.0);\n \n if (flipVolume.z == -1.0) {\n z = nSlices - z - 1.0;\n }\n\n vec2 o = offsetFrontBack(z) + loc0;\n vec4 voxelColor = texture2D(tex, o);\n\n // Apply mask\n float voxelMask = texture2D(textureAtlasMask, o).x;\n voxelMask = mix(voxelMask, 1.0, maskAlpha);\n voxelColor.rgb *= voxelMask;\n\n return bounds*voxelColor;\n}\n\nbool intersectBox(in vec3 r_o, in vec3 r_d, in vec3 boxMin, in vec3 boxMax,\n out float tnear, out float tfar) {\n // compute intersection of ray with all six bbox planes\n vec3 invR = vec3(1.0,1.0,1.0) / r_d;\n vec3 tbot = invR * (boxMin - r_o);\n vec3 ttop = invR * (boxMax - r_o);\n\n // re-order intersections to find smallest and largest on each axis\n vec3 tmin = min(ttop, tbot);\n vec3 tmax = max(ttop, tbot);\n\n // find the largest tmin and the smallest tmax\n float largest_tmin = max(max(tmin.x, tmin.y), tmin.z);\n float smallest_tmax = min(min(tmax.x, tmax.y), tmax.z);\n\n tnear = largest_tmin;\n tfar = smallest_tmax;\n\n // use >= here?\n return(smallest_tmax > largest_tmin);\n}\n\nvec4 accumulate(vec4 col, float s, vec4 C) {\n float stepScale = (1.0 - powf((1.0-col.w),s));\n col.w = stepScale;\n col.xyz *= col.w;\n col = clamp(col,0.0,1.0);\n\n C = (1.0-C.w)*col + C;\n return C;\n}\n\nvec4 integrateVolume(vec4 eye_o,vec4 eye_d,\n float tnear, float tfar,\n float clipNear, float clipFar,\n sampler2D textureAtlas\n ) {\n vec4 C = vec4(0.0);\n // march along ray from front to back, accumulating color\n\n // estimate step length\n const int maxSteps = 512;\n // modify the 3 components of eye_d by volume scale\n float scaledSteps = float(BREAK_STEPS) * length((eye_d.xyz/volumeScale));\n float csteps = clamp(float(scaledSteps), 1.0, float(maxSteps));\n float invstep = (tfar-tnear)/csteps;\n // special-casing the single slice to remove the random ray dither.\n // this removes a Moire pattern visible in single slice images, which we want to view as 2D images as best we can.\n float r = (SLICES==1.0) ? 0.0 : rand(eye_d.xy);\n // if ortho and clipped, make step size smaller so we still get same number of steps\n float tstep = invstep*orthoThickness;\n float tfarsurf = r*tstep;\n float overflow = mod((tfarsurf - tfar),tstep); // random dithering offset\n float t = tnear + overflow;\n t += r*tstep; // random dithering offset\n float tdist = 0.0;\n int numSteps = 0;\n vec4 pos, col;\n // We need to be able to scale the alpha contrib with number of ray steps,\n // in order to make the final color invariant to the step size(?)\n // use maxSteps (a constant) as the numerator... Not sure if this is sound.\n float s = 0.5 * float(maxSteps) / csteps;\n for (int i = 0; i < maxSteps; i++) {\n pos = eye_o + eye_d*t;\n // !!! assume box bounds are -0.5 .. 0.5. pos = (pos-min)/(max-min)\n // scaling is handled by model transform and already accounted for before we get here.\n // AABB clip is independent of this and is only used to determine tnear and tfar.\n pos.xyz = (pos.xyz-(-0.5))/((0.5)-(-0.5)); //0.5 * (pos + 1.0); // map position from [boxMin, boxMax] to [0, 1] coordinates\n\n vec4 col = interpolationEnabled ? sampleAtlasLinear(textureAtlas, pos) : sampleAtlasNearest(textureAtlas, pos);\n\n if (maxProject != 0) {\n col.xyz *= BRIGHTNESS;\n C = max(col, C);\n } else {\n col = luma2Alpha(col, GAMMA_MIN, GAMMA_MAX, GAMMA_SCALE);\n col.xyz *= BRIGHTNESS;\n // for practical use the density only matters for regular volume integration\n col.w *= DENSITY;\n C = accumulate(col, s, C);\n }\n t += tstep;\n numSteps = i;\n\n if (t > tfar || t > tnear+clipFar ) break;\n if (C.w > 1.0 ) break;\n }\n\n return C;\n}\n\nvoid main() {\n gl_FragColor = vec4(0.0);\n vec2 vUv = gl_FragCoord.xy/iResolution.xy;\n\n vec3 eyeRay_o, eyeRay_d;\n\n if (isOrtho == 0.0) {\n // for perspective rays:\n // world space camera coordinates\n // transform to object space\n eyeRay_o = (inverseModelViewMatrix * vec4(0.0, 0.0, 0.0, 1.0)).xyz;\n eyeRay_d = normalize(pObj - eyeRay_o);\n } else {\n // for ortho rays:\n float zDist = 2.0;\n eyeRay_d = (inverseModelViewMatrix*vec4(0.0, 0.0, -zDist, 0.0)).xyz;\n vec4 ray_o = vec4(2.0*vUv - 1.0, 1.0, 1.0);\n ray_o.xy *= orthoScale;\n ray_o.x *= iResolution.x/iResolution.y;\n eyeRay_o = (inverseModelViewMatrix*ray_o).xyz;\n }\n\n // -0.5..0.5 is full box. AABB_CLIP lets us clip to a box shaped ROI to look at\n // I am applying it here at the earliest point so that the ray march does\n // not waste steps. For general shaped ROI, this has to be handled more\n // generally (obviously)\n vec3 boxMin = AABB_CLIP_MIN;\n vec3 boxMax = AABB_CLIP_MAX;\n\n float tnear, tfar;\n bool hit = intersectBox(eyeRay_o, eyeRay_d, boxMin, boxMax, tnear, tfar);\n\n if (!hit) {\n // return background color if ray misses the cube\n // is this safe to do when there is other geometry / gObjects drawn?\n gl_FragColor = vec4(0.0); //C1;//vec4(0.0);\n return;\n }\n\n float clipNear = 0.0;//-(dot(eyeRay_o.xyz, eyeNorm) + dNear) / dot(eyeRay_d.xyz, eyeNorm);\n float clipFar = 10000.0;//-(dot(eyeRay_o.xyz,-eyeNorm) + dFar ) / dot(eyeRay_d.xyz,-eyeNorm);\n\n // Sample the depth/position texture\n // If this is a depth texture, the r component is a depth value. If this is a position texture,\n // the xyz components are a view space position and w is 1.0 iff there's a mesh at this fragment.\n vec4 meshPosSample = texture2D(textureDepth, vUv);\n // Note: we make a different check for whether a mesh is present with depth vs. position textures.\n // Here's the check for depth textures:\n bool hasDepthValue = usingPositionTexture == 0 && meshPosSample.r < 1.0;\n\n // If there's a depth-contributing mesh at this fragment, we may need to terminate the ray early\n if (hasDepthValue || (usingPositionTexture == 1 && meshPosSample.a > 0.0)) {\n if (hasDepthValue) {\n // We're working with a depth value, so we need to convert back to view space position\n // Get a projection space position from depth and uv, and unproject back to view space\n vec4 meshProj = vec4(vUv * 2.0 - 1.0, meshPosSample.r * 2.0 - 1.0, 1.0);\n vec4 meshView = inverseProjMatrix * meshProj;\n meshPosSample = vec4(meshView.xyz / meshView.w, 1.0);\n }\n // Transform the mesh position to object space\n vec4 meshObj = inverseModelViewMatrix * meshPosSample;\n\n // Derive a t value for the mesh intersection\n // NOTE: divides by 0 when `eyeRay_d.z` is 0. Could be mitigated by picking another component\n // to derive with when z is 0, but I found this was rare enough in practice to be acceptable.\n float tMesh = (meshObj.z - eyeRay_o.z) / eyeRay_d.z;\n if (tMesh < tfar) {\n clipFar = tMesh - tnear;\n }\n }\n\n vec4 C = integrateVolume(vec4(eyeRay_o,1.0), vec4(eyeRay_d,0.0),\n tnear, tfar, //intersections of box\n clipNear, clipFar,\n textureAtlas);\n\n C = clamp(C, 0.0, 1.0);\n gl_FragColor = C;\n return;\n}\n";
|
|
5
|
+
const rayMarchFragmentShader = "\n#ifdef GL_ES\nprecision highp float;\n#endif\n\n#define M_PI 3.14159265358979323846\n\nuniform vec2 iResolution;\nuniform vec2 textureRes;\nuniform float GAMMA_MIN;\nuniform float GAMMA_MAX;\nuniform float GAMMA_SCALE;\nuniform float BRIGHTNESS;\nuniform float DENSITY;\nuniform float maskAlpha;\nuniform vec2 ATLAS_DIMS;\nuniform vec3 AABB_CLIP_MIN;\nuniform float CLIP_NEAR;\nuniform vec3 AABB_CLIP_MAX;\nuniform float CLIP_FAR;\nuniform sampler2D textureAtlas;\nuniform sampler2D textureAtlasMask;\nuniform sampler2D textureDepth;\nuniform int usingPositionTexture;\nuniform int BREAK_STEPS;\nuniform float SLICES;\nuniform float isOrtho;\nuniform float orthoThickness;\nuniform float orthoScale;\nuniform int maxProject;\nuniform bool interpolationEnabled;\nuniform vec3 flipVolume;\nuniform vec3 volumeScale;\n\n// view space to axis-aligned volume box\nuniform mat4 inverseModelViewMatrix;\nuniform mat4 inverseProjMatrix;\n\nvarying vec3 pObj;\n\nfloat powf(float a, float b) {\n return pow(a, b);\n}\n\nfloat rand(vec2 co) {\n float threadId = gl_FragCoord.x / (gl_FragCoord.y + 1.0);\n float bigVal = threadId * 1299721.0 / 911.0;\n vec2 smallVal = vec2(threadId * 7927.0 / 577.0, threadId * 104743.0 / 1039.0);\n return fract(sin(dot(co, smallVal)) * bigVal);\n}\n\nvec4 luma2Alpha(vec4 color, float vmin, float vmax, float C) {\n float x = dot(color.rgb, vec3(0.2125, 0.7154, 0.0721));\n // float x = max(color[2], max(color[0],color[1]));\n float xi = (x - vmin) / (vmax - vmin);\n xi = clamp(xi, 0.0, 1.0);\n float y = pow(xi, C);\n y = clamp(y, 0.0, 1.0);\n color[3] = y;\n return color;\n}\n\nvec2 offsetFrontBack(float t) {\n int a = int(t);\n int ax = int(ATLAS_DIMS.x);\n vec2 os = vec2(float(a - (a / ax) * ax), float(a / ax)) / ATLAS_DIMS;\n return clamp(os, vec2(0.0), vec2(1.0) - vec2(1.0) / ATLAS_DIMS);\n}\n\nvec4 sampleAtlasLinear(sampler2D tex, vec4 pos) {\n float bounds = float(pos[0] >= 0.0 && pos[0] <= 1.0 &&\n pos[1] >= 0.0 && pos[1] <= 1.0 &&\n pos[2] >= 0.0 && pos[2] <= 1.0);\n float nSlices = float(SLICES);\n // get location within atlas tile\n // TODO: get loc1 which follows ray to next slice along ray direction\n // when flipvolume = 1: pos\n // when flipvolume = -1: 1-pos\n vec2 loc0 = ((pos.xy - 0.5) * flipVolume.xy + 0.5) / ATLAS_DIMS;\n\n // loc ranges from 0 to 1/ATLAS_DIMS\n // shrink loc0 to within one half edge texel - so as not to sample across edges of tiles.\n loc0 = vec2(0.5) / textureRes + loc0 * (vec2(1.0) - ATLAS_DIMS / textureRes);\n\n // interpolate between two slices\n float z = (pos.z) * (nSlices - 1.0);\n float z0 = floor(z);\n float t = z - z0; //mod(z, 1.0);\n float z1 = min(z0 + 1.0, nSlices - 1.0);\n\n // flipped:\n if (flipVolume.z == -1.0) {\n z0 = nSlices - z0 - 1.0;\n z1 = nSlices - z1 - 1.0;\n t = 1.0 - t;\n }\n\n // get slice offsets in texture atlas\n vec2 o0 = offsetFrontBack(z0) + loc0;\n vec2 o1 = offsetFrontBack(z1) + loc0;\n\n vec4 slice0Color = texture2D(tex, o0);\n vec4 slice1Color = texture2D(tex, o1);\n // NOTE we could premultiply the mask in the fuse function,\n // but that is slower to update the maskAlpha value than here in the shader.\n // it is a memory vs perf tradeoff. Do users really need to update the maskAlpha at realtime speed?\n float slice0Mask = texture2D(textureAtlasMask, o0).x;\n float slice1Mask = texture2D(textureAtlasMask, o1).x;\n // or use max for conservative 0 or 1 masking?\n float maskVal = mix(slice0Mask, slice1Mask, t);\n // take mask from 0..1 to alpha..1\n maskVal = mix(maskVal, 1.0, maskAlpha);\n vec4 retval = mix(slice0Color, slice1Color, t);\n // only mask the rgb, not the alpha(?)\n retval.rgb *= maskVal;\n return bounds * retval;\n}\n\nvec4 sampleAtlasNearest(sampler2D tex, vec4 pos) {\n float bounds = float(pos[0] >= 0.0 && pos[0] <= 1.0 &&\n pos[1] >= 0.0 && pos[1] <= 1.0 &&\n pos[2] >= 0.0 && pos[2] <= 1.0);\n float nSlices = float(SLICES);\n\n vec2 loc0 = ((pos.xy - 0.5) * flipVolume.xy + 0.5) / ATLAS_DIMS;\n\n // No interpolation - sample just one slice at a pixel center.\n // Ideally this would be accomplished in part by switching this texture to linear\n // filtering, but three makes this difficult to do through a WebGLRenderTarget.\n loc0 = floor(loc0 * textureRes) / textureRes;\n loc0 += vec2(0.5) / textureRes;\n\n float z = min(floor(pos.z * nSlices), nSlices - 1.0);\n\n if (flipVolume.z == -1.0) {\n z = nSlices - z - 1.0;\n }\n\n vec2 o = offsetFrontBack(z) + loc0;\n vec4 voxelColor = texture2D(tex, o);\n\n // Apply mask\n float voxelMask = texture2D(textureAtlasMask, o).x;\n voxelMask = mix(voxelMask, 1.0, maskAlpha);\n voxelColor.rgb *= voxelMask;\n\n return bounds * voxelColor;\n}\n\nbool intersectBox(\n in vec3 r_o,\n in vec3 r_d,\n in vec3 boxMin,\n in vec3 boxMax,\n out float tnear,\n out float tfar\n) {\n // compute intersection of ray with all six bbox planes\n vec3 invR = vec3(1.0, 1.0, 1.0) / r_d;\n vec3 tbot = invR * (boxMin - r_o);\n vec3 ttop = invR * (boxMax - r_o);\n\n // re-order intersections to find smallest and largest on each axis\n vec3 tmin = min(ttop, tbot);\n vec3 tmax = max(ttop, tbot);\n\n // find the largest tmin and the smallest tmax\n float largest_tmin = max(max(tmin.x, tmin.y), tmin.z);\n float smallest_tmax = min(min(tmax.x, tmax.y), tmax.z);\n\n tnear = largest_tmin;\n tfar = smallest_tmax;\n\n // use >= here?\n return (smallest_tmax > largest_tmin);\n}\n\nvec4 accumulate(vec4 col, float s, vec4 C) {\n float stepScale = (1.0 - powf((1.0 - col.w), s));\n col.w = stepScale;\n col.xyz *= col.w;\n col = clamp(col, 0.0, 1.0);\n\n C = (1.0 - C.w) * col + C;\n return C;\n}\n\nvec4 integrateVolume(\n vec4 eye_o,\n vec4 eye_d,\n float tnear,\n float tfar,\n float clipNear,\n float clipFar,\n sampler2D textureAtlas\n) {\n vec4 C = vec4(0.0);\n // march along ray from front to back, accumulating color\n\n // estimate step length\n const int maxSteps = 512;\n // modify the 3 components of eye_d by volume scale\n float scaledSteps = float(BREAK_STEPS) * length((eye_d.xyz / volumeScale));\n float csteps = clamp(float(scaledSteps), 1.0, float(maxSteps));\n float invstep = (tfar - tnear) / csteps;\n // special-casing the single slice to remove the random ray dither.\n // this removes a Moire pattern visible in single slice images, which we want to view as 2D images as best we can.\n float r = (SLICES == 1.0) ? 0.0 : rand(eye_d.xy);\n // if ortho and clipped, make step size smaller so we still get same number of steps\n float tstep = invstep * orthoThickness;\n float tfarsurf = r * tstep;\n float overflow = mod((tfarsurf - tfar), tstep); // random dithering offset\n float t = tnear + overflow;\n t += r * tstep; // random dithering offset\n float tdist = 0.0;\n int numSteps = 0;\n vec4 pos, col;\n // We need to be able to scale the alpha contrib with number of ray steps,\n // in order to make the final color invariant to the step size(?)\n // use maxSteps (a constant) as the numerator... Not sure if this is sound.\n float s = 0.5 * float(maxSteps) / csteps;\n for (int i = 0; i < maxSteps; i++) {\n pos = eye_o + eye_d * t;\n // !!! assume box bounds are -0.5 .. 0.5. pos = (pos-min)/(max-min)\n // scaling is handled by model transform and already accounted for before we get here.\n // AABB clip is independent of this and is only used to determine tnear and tfar.\n pos.xyz = (pos.xyz - (-0.5)) / ((0.5) - (-0.5)); //0.5 * (pos + 1.0); // map position from [boxMin, boxMax] to [0, 1] coordinates\n\n vec4 col = interpolationEnabled ? sampleAtlasLinear(textureAtlas, pos) : sampleAtlasNearest(textureAtlas, pos);\n\n if (maxProject != 0) {\n col.xyz *= BRIGHTNESS;\n C = max(col, C);\n } else {\n col = luma2Alpha(col, GAMMA_MIN, GAMMA_MAX, GAMMA_SCALE);\n col.xyz *= BRIGHTNESS;\n // for practical use the density only matters for regular volume integration\n col.w *= DENSITY;\n C = accumulate(col, s, C);\n }\n t += tstep;\n numSteps = i;\n\n if (t > tfar || t > tnear + clipFar)\n break;\n if (C.w > 1.0)\n break;\n }\n\n return C;\n}\n\nvoid main() {\n gl_FragColor = vec4(0.0);\n vec2 vUv = gl_FragCoord.xy / iResolution.xy;\n\n vec3 eyeRay_o, eyeRay_d;\n\n if (isOrtho == 0.0) {\n // for perspective rays:\n // world space camera coordinates\n // transform to object space\n eyeRay_o = (inverseModelViewMatrix * vec4(0.0, 0.0, 0.0, 1.0)).xyz;\n eyeRay_d = normalize(pObj - eyeRay_o);\n } else {\n // for ortho rays:\n float zDist = 2.0;\n eyeRay_d = (inverseModelViewMatrix * vec4(0.0, 0.0, -zDist, 0.0)).xyz;\n vec4 ray_o = vec4(2.0 * vUv - 1.0, 1.0, 1.0);\n ray_o.xy *= orthoScale;\n ray_o.x *= iResolution.x / iResolution.y;\n eyeRay_o = (inverseModelViewMatrix * ray_o).xyz;\n }\n\n // -0.5..0.5 is full box. AABB_CLIP lets us clip to a box shaped ROI to look at\n // I am applying it here at the earliest point so that the ray march does\n // not waste steps. For general shaped ROI, this has to be handled more\n // generally (obviously)\n vec3 boxMin = AABB_CLIP_MIN;\n vec3 boxMax = AABB_CLIP_MAX;\n\n float tnear, tfar;\n bool hit = intersectBox(eyeRay_o, eyeRay_d, boxMin, boxMax, tnear, tfar);\n\n if (!hit) {\n // return background color if ray misses the cube\n // is this safe to do when there is other geometry / gObjects drawn?\n gl_FragColor = vec4(0.0); //C1;//vec4(0.0);\n return;\n }\n\n float clipNear = 0.0;//-(dot(eyeRay_o.xyz, eyeNorm) + dNear) / dot(eyeRay_d.xyz, eyeNorm);\n float clipFar = 10000.0;//-(dot(eyeRay_o.xyz,-eyeNorm) + dFar ) / dot(eyeRay_d.xyz,-eyeNorm);\n\n // Sample the depth/position texture\n // If this is a depth texture, the r component is a depth value. If this is a position texture,\n // the xyz components are a view space position and w is 1.0 iff there's a mesh at this fragment.\n vec4 meshPosSample = texture2D(textureDepth, vUv);\n // Note: we make a different check for whether a mesh is present with depth vs. position textures.\n // Here's the check for depth textures:\n bool hasDepthValue = usingPositionTexture == 0 && meshPosSample.r < 1.0;\n\n // If there's a depth-contributing mesh at this fragment, we may need to terminate the ray early\n if (hasDepthValue || (usingPositionTexture == 1 && meshPosSample.a > 0.0)) {\n if (hasDepthValue) {\n // We're working with a depth value, so we need to convert back to view space position\n // Get a projection space position from depth and uv, and unproject back to view space\n vec4 meshProj = vec4(vUv * 2.0 - 1.0, meshPosSample.r * 2.0 - 1.0, 1.0);\n vec4 meshView = inverseProjMatrix * meshProj;\n meshPosSample = vec4(meshView.xyz / meshView.w, 1.0);\n }\n // Transform the mesh position to object space\n vec4 meshObj = inverseModelViewMatrix * meshPosSample;\n\n // Derive a t value for the mesh intersection\n // NOTE: divides by 0 when `eyeRay_d.z` is 0. Could be mitigated by picking another component\n // to derive with when z is 0, but I found this was rare enough in practice to be acceptable.\n float tMesh = (meshObj.z - eyeRay_o.z) / eyeRay_d.z;\n if (tMesh < tfar) {\n clipFar = tMesh - tnear;\n }\n }\n\n //tnear and tfar are intersections of box\n vec4 C = integrateVolume(vec4(eyeRay_o, 1.0), vec4(eyeRay_d, 0.0), tnear, tfar, clipNear, clipFar, textureAtlas);\n\n C = clamp(C, 0.0, 1.0);\n gl_FragColor = C;\n return;\n}\n";
|
|
6
6
|
export const rayMarchingVertexShaderSrc = rayMarchVertexShader;
|
|
7
7
|
export const rayMarchingFragmentShaderSrc = rayMarchFragmentShader;
|
|
8
8
|
export const rayMarchingShaderUniforms = () => {
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
/* babel-plugin-inline-import './shaders/slice.vert' */
|
|
2
2
|
const sliceVertexShader = "precision highp float;\nprecision highp int;\n\nvarying vec2 vUv;\n\nvoid main() {\n vUv = uv;\n gl_Position = projectionMatrix *\n modelViewMatrix *\n vec4(position, 1.0);\n}\n";
|
|
3
3
|
/* babel-plugin-inline-import './shaders/slice.frag' */
|
|
4
|
-
const sliceFragShader = "\n#ifdef GL_ES\nprecision highp float;\n#endif\n\nuniform vec2 textureRes;\nuniform float GAMMA_MIN;\nuniform float GAMMA_MAX;\nuniform float GAMMA_SCALE;\nuniform float BRIGHTNESS;\nuniform float DENSITY;\nuniform float maskAlpha;\nuniform vec2 ATLAS_DIMS;\nuniform vec3 AABB_CLIP_MIN;\nuniform vec3 AABB_CLIP_MAX;\nuniform sampler2D textureAtlas;\nuniform sampler2D textureAtlasMask;\nuniform int Z_SLICE;\nuniform float SLICES;\nuniform bool interpolationEnabled;\nuniform vec3 flipVolume;\n\nvarying vec2 vUv;\n\n// for atlased texture, we need to find the uv offset for the slice at t\nvec2 offsetFrontBack(float t) {\n int a = int(t);\n int ax = int(ATLAS_DIMS.x);\n vec2 os = vec2(float(a - (a / ax) * ax), float(a / ax)) / ATLAS_DIMS;\n return clamp(os, vec2(0.0), vec2(1.0) - vec2(1.0) / ATLAS_DIMS);\n}\n\nvec4 sampleAtlas(sampler2D tex, vec4 pos) {\n float bounds = float(pos[0] >= 0.0 && pos[0] <= 1.0 &&\n pos[1] >= 0.0 && pos[1] <= 1.0 &&\n pos[2] >= 0.0 && pos[2] <= 1.0);\n\n float nSlices = float(SLICES);\n\n vec2 loc0 = ((pos.xy - 0.5) * flipVolume.xy + 0.5) / ATLAS_DIMS;\n\n
|
|
4
|
+
const sliceFragShader = "\n#ifdef GL_ES\nprecision highp float;\n#endif\n\nuniform vec2 textureRes;\nuniform float GAMMA_MIN;\nuniform float GAMMA_MAX;\nuniform float GAMMA_SCALE;\nuniform float BRIGHTNESS;\nuniform float DENSITY;\nuniform float maskAlpha;\nuniform vec2 ATLAS_DIMS;\nuniform vec3 AABB_CLIP_MIN;\nuniform vec3 AABB_CLIP_MAX;\nuniform sampler2D textureAtlas;\nuniform sampler2D textureAtlasMask;\nuniform int Z_SLICE;\nuniform float SLICES;\nuniform bool interpolationEnabled;\nuniform vec3 flipVolume;\n\nvarying vec2 vUv;\n\n// for atlased texture, we need to find the uv offset for the slice at t\nvec2 offsetFrontBack(float t) {\n int a = int(t);\n int ax = int(ATLAS_DIMS.x);\n vec2 os = vec2(float(a - (a / ax) * ax), float(a / ax)) / ATLAS_DIMS;\n return clamp(os, vec2(0.0), vec2(1.0) - vec2(1.0) / ATLAS_DIMS);\n}\n\nvec4 sampleAtlas(sampler2D tex, vec4 pos) {\n float bounds = float(pos[0] >= 0.0 && pos[0] <= 1.0 &&\n pos[1] >= 0.0 && pos[1] <= 1.0 &&\n pos[2] >= 0.0 && pos[2] <= 1.0);\n\n float nSlices = float(SLICES);\n\n vec2 loc0 = ((pos.xy - 0.5) * flipVolume.xy + 0.5) / ATLAS_DIMS;\n\n if (interpolationEnabled) {\n // loc ranges from 0 to 1/ATLAS_DIMS\n // shrink loc0 to within one half edge texel - so as not to sample across edges of tiles.\n loc0 = loc0 * (vec2(1.0) - ATLAS_DIMS / textureRes);\n } else {\n // No interpolation - sample just one slice at a pixel center.\n loc0 = floor(loc0 * textureRes) / textureRes;\n }\n loc0 += vec2(0.5) / textureRes;\n\n float z = min(floor(pos.z * nSlices), nSlices - 1.0);\n\n if (flipVolume.z == -1.0) {\n z = nSlices - z - 1.0;\n }\n\n vec2 o = offsetFrontBack(z) + loc0;\n vec4 voxelColor = texture2D(tex, o);\n\n // Apply mask\n float voxelMask = texture2D(textureAtlasMask, o).x;\n voxelMask = mix(voxelMask, 1.0, maskAlpha);\n voxelColor.rgb *= voxelMask;\n\n return bounds * voxelColor;\n}\n\nvoid main() {\n gl_FragColor = vec4(0.0);\n\n vec3 boxMin = AABB_CLIP_MIN;\n vec3 boxMax = AABB_CLIP_MAX;\n // Normalize UV for [-0.5, 0.5] range\n vec2 normUv = vUv - vec2(0.5);\n\n // Return background color if outside of clipping box\n if (normUv.x < boxMin.x || normUv.x > boxMax.x || normUv.y < boxMin.y || normUv.y > boxMax.y) {\n gl_FragColor = vec4(0.0);\n return;\n }\n\n // Normalize z-slice by total slices\n vec4 pos = vec4(vUv, (SLICES == 1.0 && Z_SLICE == 0) ? 0.0 : float(Z_SLICE) / (SLICES - 1.0), 0.0);\n\n vec4 C;\n C = sampleAtlas(textureAtlas, pos);\n C.xyz *= BRIGHTNESS;\n\n C = clamp(C, 0.0, 1.0);\n gl_FragColor = C;\n return;\n}";
|
|
5
5
|
import { Vector2, Vector3, Matrix4, Texture } from "three";
|
|
6
6
|
export const sliceVertexShaderSrc = sliceVertexShader;
|
|
7
7
|
export const sliceFragmentShaderSrc = sliceFragShader;
|
|
@@ -3,6 +3,7 @@ import { ThreadableVolumeLoader } from "./IVolumeLoader.js";
|
|
|
3
3
|
import { computeAtlasSize } from "../ImageInfo.js";
|
|
4
4
|
import { isChunk } from "../VolumeCache.js";
|
|
5
5
|
import { getDataRange } from "../utils/num_utils.js";
|
|
6
|
+
import { remapUri } from "../utils/url_utils.js";
|
|
6
7
|
|
|
7
8
|
/* eslint-disable @typescript-eslint/naming-convention */
|
|
8
9
|
|
|
@@ -70,7 +71,7 @@ class JsonImageInfoLoader extends ThreadableVolumeLoader {
|
|
|
70
71
|
if (cachedInfo) {
|
|
71
72
|
return cachedInfo;
|
|
72
73
|
}
|
|
73
|
-
const response = await fetch(this.urls[time]);
|
|
74
|
+
const response = await fetch(remapUri(this.urls[time]));
|
|
74
75
|
const imageInfo = await response.json();
|
|
75
76
|
imageInfo.pixel_size_unit = imageInfo.pixel_size_unit || "μm";
|
|
76
77
|
imageInfo.times = imageInfo.times || this.urls.length;
|
|
@@ -194,7 +195,7 @@ class JsonImageInfoLoader extends ThreadableVolumeLoader {
|
|
|
194
195
|
if (cacheHit) {
|
|
195
196
|
return;
|
|
196
197
|
}
|
|
197
|
-
const response = await fetch(image.name, {
|
|
198
|
+
const response = await fetch(remapUri(image.name), {
|
|
198
199
|
mode: "cors"
|
|
199
200
|
});
|
|
200
201
|
const blob = await response.blob();
|
|
@@ -12,6 +12,7 @@ import { getScale, getSourceChannelMeta, matchSourceScaleLevels, orderByDimensio
|
|
|
12
12
|
import { VolumeLoadError, VolumeLoadErrorType, wrapVolumeLoadError } from "./VolumeLoadError.js";
|
|
13
13
|
import wrapArray, { RelaxedFetchStore } from "./zarr_utils/wrappers.js";
|
|
14
14
|
import { assertMetadataHasMultiscales, toOMEZarrMetaV4, validateOMEZarrMetadata } from "./zarr_utils/validation.js";
|
|
15
|
+
import { remapUri } from "../utils/url_utils.js";
|
|
15
16
|
const CHUNK_REQUEST_CANCEL_REASON = "chunk request cancelled";
|
|
16
17
|
|
|
17
18
|
// returns the converted data and the original min and max values
|
|
@@ -81,7 +82,7 @@ class OMEZarrLoader extends ThreadableVolumeLoader {
|
|
|
81
82
|
if (!queue) {
|
|
82
83
|
queue = new SubscribableRequestQueue(fetchOptions?.concurrencyLimit, fetchOptions?.prefetchConcurrencyLimit);
|
|
83
84
|
}
|
|
84
|
-
const urlsArr = Array.isArray(urls) ? urls : [urls];
|
|
85
|
+
const urlsArr = (Array.isArray(urls) ? urls : [urls]).map(remapUri);
|
|
85
86
|
const scenesArr = Array.isArray(scenes) ? scenes : [scenes];
|
|
86
87
|
|
|
87
88
|
// Create one `ZarrSource` per URL
|
|
@@ -130,7 +131,13 @@ class OMEZarrLoader extends ThreadableVolumeLoader {
|
|
|
130
131
|
let channelCount = 0;
|
|
131
132
|
for (const s of sources) {
|
|
132
133
|
s.channelOffset = channelCount;
|
|
133
|
-
|
|
134
|
+
if (s.omeroMetadata !== undefined) {
|
|
135
|
+
channelCount += s.omeroMetadata.channels.length;
|
|
136
|
+
} else if (s.axesTCZYX[1] > -1) {
|
|
137
|
+
channelCount += s.scaleLevels[0].shape[s.axesTCZYX[1]];
|
|
138
|
+
} else {
|
|
139
|
+
channelCount += 1;
|
|
140
|
+
}
|
|
134
141
|
}
|
|
135
142
|
// Ensure the sizes of all sources' scale levels are matched up. See this function's docs for more.
|
|
136
143
|
matchSourceScaleLevels(sources);
|
|
@@ -358,10 +365,10 @@ class OMEZarrLoader extends ThreadableVolumeLoader {
|
|
|
358
365
|
});
|
|
359
366
|
|
|
360
367
|
// Get number of chunks per dimension in every source array
|
|
361
|
-
const chunkDimsTCZYX = this.sources.map(
|
|
362
|
-
const level =
|
|
368
|
+
const chunkDimsTCZYX = this.sources.map((source, sourceIndex) => {
|
|
369
|
+
const level = source.scaleLevels[scaleLevel];
|
|
363
370
|
const chunkDimsUnordered = level.shape.map((dim, idx) => Math.ceil(dim / level.chunks[idx]));
|
|
364
|
-
return this.orderByTCZYX(chunkDimsUnordered, 1);
|
|
371
|
+
return this.orderByTCZYX(chunkDimsUnordered, 1, sourceIndex);
|
|
365
372
|
});
|
|
366
373
|
|
|
367
374
|
// `ChunkPrefetchIterator` yields chunk coordinates in order of roughly how likely they are to be loaded next
|
package/es/loaders/TiffLoader.js
CHANGED
|
@@ -4,6 +4,7 @@ import { ThreadableVolumeLoader, LoadSpec } from "./IVolumeLoader.js";
|
|
|
4
4
|
import { computePackedAtlasDims, MAX_ATLAS_EDGE } from "./VolumeLoaderUtils.js";
|
|
5
5
|
import { VolumeLoadError, VolumeLoadErrorType, wrapVolumeLoadError } from "./VolumeLoadError.js";
|
|
6
6
|
import { CImageInfo } from "../ImageInfo.js";
|
|
7
|
+
import { remapUri } from "../utils/url_utils.js";
|
|
7
8
|
function trimNull(xml) {
|
|
8
9
|
// trim trailing unicode zeros?
|
|
9
10
|
return xml && xml.trim().replace(/\0/g, "").trim();
|
|
@@ -92,7 +93,7 @@ const getPixelType = pxSize => pxSize === 1 ? "uint8" : pxSize === 2 ? "uint16"
|
|
|
92
93
|
class TiffLoader extends ThreadableVolumeLoader {
|
|
93
94
|
constructor(url) {
|
|
94
95
|
super();
|
|
95
|
-
this.url = url;
|
|
96
|
+
this.url = url.map(remapUri);
|
|
96
97
|
}
|
|
97
98
|
async loadOmeDims() {
|
|
98
99
|
if (!this.dims) {
|
package/es/types/Line3d.d.ts
CHANGED
|
@@ -8,14 +8,38 @@ import BaseDrawableMeshObject from "./BaseDrawableMeshObject.js";
|
|
|
8
8
|
export default class Line3d extends BaseDrawableMeshObject implements IDrawableObject {
|
|
9
9
|
private lineMesh;
|
|
10
10
|
private bufferSize;
|
|
11
|
+
private lineMaterial;
|
|
12
|
+
private useVertexColors;
|
|
13
|
+
private useColorRamp;
|
|
14
|
+
private colorRampTexture;
|
|
11
15
|
constructor();
|
|
16
|
+
private updateVertexColorFlag;
|
|
12
17
|
/**
|
|
13
18
|
* Sets the color of the line material.
|
|
14
19
|
* @param color Base line color.
|
|
15
|
-
* @param useVertexColors If true,
|
|
16
|
-
* the per-vertex colors defined in the geometry (see `setLineVertexData`).
|
|
20
|
+
* @param useVertexColors If true, the line will multiply the base color with
|
|
21
|
+
* the per-vertex colors defined in the geometry (see `setLineVertexData`).
|
|
22
|
+
* Default is `false`.
|
|
17
23
|
*/
|
|
18
24
|
setColor(color: Color, useVertexColors?: boolean): void;
|
|
25
|
+
/**
|
|
26
|
+
* Returns a new DataTexture representing the color stops in LinearSRGB color
|
|
27
|
+
* space.
|
|
28
|
+
*/
|
|
29
|
+
private static colorStopsToTexture;
|
|
30
|
+
/**
|
|
31
|
+
* Sets the color ramp used for coloring the line. Note that the color will be
|
|
32
|
+
* multiplied by the base color defined in `setColor()`.
|
|
33
|
+
* @param colorStops Array of hex color stop strings.
|
|
34
|
+
* @param useColorRamp If true, the line will use the color ramp for coloring.
|
|
35
|
+
* Default is `false`.
|
|
36
|
+
*/
|
|
37
|
+
setColorRamp(colorStops: string[], useColorRamp?: boolean): void;
|
|
38
|
+
/**
|
|
39
|
+
* Sets the scaling parameters for how the color ramp is applied. The color
|
|
40
|
+
* ramp will be centered at `vertexOffset` and span `vertexScale` vertices.
|
|
41
|
+
*/
|
|
42
|
+
setColorRampScale(vertexScale: number, vertexOffset: number): void;
|
|
19
43
|
/**
|
|
20
44
|
* Sets the opacity of the line material.
|
|
21
45
|
*
|
|
@@ -51,6 +75,18 @@ export default class Line3d extends BaseDrawableMeshObject implements IDrawableO
|
|
|
51
75
|
* length is not a multiple of 3.
|
|
52
76
|
*/
|
|
53
77
|
setLineVertexData(positions: Float32Array, colors?: Float32Array): void;
|
|
54
|
-
/**
|
|
78
|
+
/**
|
|
79
|
+
* Number of line segments that should be visible.
|
|
80
|
+
* @deprecated Use `setVisibleSegmentsRange` instead.
|
|
81
|
+
*/
|
|
55
82
|
setNumSegmentsVisible(segments: number): void;
|
|
83
|
+
/**
|
|
84
|
+
* Sets the range of line segments that are visible; line segments outside of
|
|
85
|
+
* the range will be hidden.
|
|
86
|
+
* @param startSegment Index of the segment at the start of the visible range
|
|
87
|
+
* (inclusive).
|
|
88
|
+
* @param endSegment Index of the segment at the end of the visible range
|
|
89
|
+
* (exclusive).
|
|
90
|
+
*/
|
|
91
|
+
setVisibleSegmentsRange(startSegment: number, endSegment: number): void;
|
|
56
92
|
}
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
import { type Texture } from "three";
|
|
2
|
+
import { LineMaterial, type LineMaterialParameters } from "three/addons/lines/LineMaterial";
|
|
3
|
+
type SubrangeLineMaterialParameters = LineMaterialParameters & {
|
|
4
|
+
minInstance?: number;
|
|
5
|
+
};
|
|
6
|
+
/**
|
|
7
|
+
* Replacement for LineMaterial with custom vertex shader to support showing
|
|
8
|
+
* only a subrange of line segments. Use with `instanceCount` on the geometry
|
|
9
|
+
* and the `minInstance` uniform to control the visible range.
|
|
10
|
+
*/
|
|
11
|
+
export default class SubrangeLineMaterial extends LineMaterial {
|
|
12
|
+
constructor(params?: SubrangeLineMaterialParameters);
|
|
13
|
+
/**
|
|
14
|
+
* The minimum instance index to render, inclusive. Instances below this index
|
|
15
|
+
* will not be visible. Use with `instanceCount` on the geometry to show a
|
|
16
|
+
* subrange of line segments.
|
|
17
|
+
*/
|
|
18
|
+
set minInstance(value: number);
|
|
19
|
+
set useColorRamp(value: boolean);
|
|
20
|
+
set colorRamp(value: Texture);
|
|
21
|
+
/** The number of vertices that the color ramp spans. */
|
|
22
|
+
set colorRampVertexScale(value: number);
|
|
23
|
+
/**
|
|
24
|
+
* The vertex index that will be assigned the middle of the color ramp. Vertex
|
|
25
|
+
* indices start at 0 for the first vertex in the line segments geometry.
|
|
26
|
+
*
|
|
27
|
+
* For example, if the color ramp spans 10 vertices, setting
|
|
28
|
+
* `colorRampVertexOffset` to 5 will center the color ramp on the 5th vertex,
|
|
29
|
+
* with the starting color at vertex 0 and the ending color at vertex 10.
|
|
30
|
+
*/
|
|
31
|
+
set colorRampVertexOffset(value: number);
|
|
32
|
+
}
|
|
33
|
+
export {};
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
const S3_URL_PREFIX = "s3://";
|
|
2
|
+
const GCS_URL_PREFIX = "gs://";
|
|
3
|
+
const VAST_FILES_PREFIX = "/allen/aics/";
|
|
4
|
+
const VAST_FILES_URL = "https://vast-files.int.allencell.org/";
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* Remaps non-standard URIs (e.g. S3 (`s3://`), Google Cloud Storage (`gs://`), or
|
|
8
|
+
* VAST files (`/allen/aics/`)) to a standard HTTPS URL.
|
|
9
|
+
*/
|
|
10
|
+
export function remapUri(url) {
|
|
11
|
+
let newUrl = url.trim();
|
|
12
|
+
if (newUrl.startsWith(S3_URL_PREFIX)) {
|
|
13
|
+
// remap s3://bucket/key to https://bucket.s3.amazonaws.com/key
|
|
14
|
+
const s3Path = newUrl.slice(S3_URL_PREFIX.length);
|
|
15
|
+
const pathSegments = s3Path.split("/");
|
|
16
|
+
newUrl = `https://${pathSegments[0]}.s3.amazonaws.com/${pathSegments.slice(1).join("/")}`;
|
|
17
|
+
} else if (newUrl.startsWith(GCS_URL_PREFIX)) {
|
|
18
|
+
// remap gs://bucket/key to https://storage.googleapis.com/bucket/key
|
|
19
|
+
newUrl = newUrl.replace(GCS_URL_PREFIX, "https://storage.googleapis.com/");
|
|
20
|
+
} else if (newUrl.startsWith(VAST_FILES_PREFIX)) {
|
|
21
|
+
// remap /allen/aics/... to https://vast-files.int.allencell.org/...
|
|
22
|
+
newUrl = newUrl.replace(VAST_FILES_PREFIX, VAST_FILES_URL);
|
|
23
|
+
}
|
|
24
|
+
return newUrl;
|
|
25
|
+
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@aics/vole-core",
|
|
3
|
-
"version": "4.
|
|
3
|
+
"version": "4.6.2",
|
|
4
4
|
"description": "volume renderer for 3d, 4d, or 5d imaging data with OME-Zarr support",
|
|
5
5
|
"main": "es/index.js",
|
|
6
6
|
"type": "module",
|
|
@@ -33,6 +33,10 @@
|
|
|
33
33
|
"publishConfig": {
|
|
34
34
|
"access": "public"
|
|
35
35
|
},
|
|
36
|
+
"repository": {
|
|
37
|
+
"type": "git",
|
|
38
|
+
"url": "git+https://github.com/allen-cell-animated/vole-core.git"
|
|
39
|
+
},
|
|
36
40
|
"dependencies": {
|
|
37
41
|
"@babel/runtime": "^7.25.6",
|
|
38
42
|
"geotiff": "^2.0.5",
|