@aics/vole-core 3.12.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (141) hide show
  1. package/LICENSE.txt +26 -0
  2. package/README.md +119 -0
  3. package/es/Atlas2DSlice.js +224 -0
  4. package/es/Channel.js +264 -0
  5. package/es/FileSaver.js +31 -0
  6. package/es/FusedChannelData.js +192 -0
  7. package/es/Histogram.js +250 -0
  8. package/es/ImageInfo.js +127 -0
  9. package/es/Light.js +74 -0
  10. package/es/Lut.js +500 -0
  11. package/es/MarchingCubes.js +507 -0
  12. package/es/MeshVolume.js +334 -0
  13. package/es/NaiveSurfaceNets.js +251 -0
  14. package/es/PathTracedVolume.js +482 -0
  15. package/es/RayMarchedAtlasVolume.js +250 -0
  16. package/es/RenderToBuffer.js +31 -0
  17. package/es/ThreeJsPanel.js +633 -0
  18. package/es/Timing.js +28 -0
  19. package/es/TrackballControls.js +538 -0
  20. package/es/View3d.js +848 -0
  21. package/es/Volume.js +352 -0
  22. package/es/VolumeCache.js +161 -0
  23. package/es/VolumeDims.js +16 -0
  24. package/es/VolumeDrawable.js +702 -0
  25. package/es/VolumeMaker.js +101 -0
  26. package/es/VolumeRenderImpl.js +1 -0
  27. package/es/VolumeRenderSettings.js +203 -0
  28. package/es/constants/basicShaders.js +29 -0
  29. package/es/constants/colors.js +59 -0
  30. package/es/constants/denoiseShader.js +43 -0
  31. package/es/constants/lights.js +42 -0
  32. package/es/constants/materials.js +85 -0
  33. package/es/constants/pathtraceOutputShader.js +13 -0
  34. package/es/constants/scaleBarSVG.js +21 -0
  35. package/es/constants/time.js +34 -0
  36. package/es/constants/volumePTshader.js +153 -0
  37. package/es/constants/volumeRayMarchShader.js +123 -0
  38. package/es/constants/volumeSliceShader.js +115 -0
  39. package/es/index.js +21 -0
  40. package/es/loaders/IVolumeLoader.js +131 -0
  41. package/es/loaders/JsonImageInfoLoader.js +255 -0
  42. package/es/loaders/OmeZarrLoader.js +495 -0
  43. package/es/loaders/OpenCellLoader.js +65 -0
  44. package/es/loaders/RawArrayLoader.js +89 -0
  45. package/es/loaders/TiffLoader.js +219 -0
  46. package/es/loaders/VolumeLoadError.js +44 -0
  47. package/es/loaders/VolumeLoaderUtils.js +221 -0
  48. package/es/loaders/index.js +40 -0
  49. package/es/loaders/zarr_utils/ChunkPrefetchIterator.js +143 -0
  50. package/es/loaders/zarr_utils/WrappedStore.js +51 -0
  51. package/es/loaders/zarr_utils/types.js +24 -0
  52. package/es/loaders/zarr_utils/utils.js +225 -0
  53. package/es/loaders/zarr_utils/validation.js +49 -0
  54. package/es/test/ChunkPrefetchIterator.test.js +208 -0
  55. package/es/test/RequestQueue.test.js +442 -0
  56. package/es/test/SubscribableRequestQueue.test.js +244 -0
  57. package/es/test/VolumeCache.test.js +118 -0
  58. package/es/test/VolumeRenderSettings.test.js +71 -0
  59. package/es/test/lut.test.js +671 -0
  60. package/es/test/num_utils.test.js +140 -0
  61. package/es/test/volume.test.js +98 -0
  62. package/es/test/zarr_utils.test.js +358 -0
  63. package/es/types/Atlas2DSlice.d.ts +41 -0
  64. package/es/types/Channel.d.ts +44 -0
  65. package/es/types/FileSaver.d.ts +6 -0
  66. package/es/types/FusedChannelData.d.ts +26 -0
  67. package/es/types/Histogram.d.ts +57 -0
  68. package/es/types/ImageInfo.d.ts +87 -0
  69. package/es/types/Light.d.ts +27 -0
  70. package/es/types/Lut.d.ts +67 -0
  71. package/es/types/MarchingCubes.d.ts +53 -0
  72. package/es/types/MeshVolume.d.ts +40 -0
  73. package/es/types/NaiveSurfaceNets.d.ts +11 -0
  74. package/es/types/PathTracedVolume.d.ts +65 -0
  75. package/es/types/RayMarchedAtlasVolume.d.ts +41 -0
  76. package/es/types/RenderToBuffer.d.ts +17 -0
  77. package/es/types/ThreeJsPanel.d.ts +107 -0
  78. package/es/types/Timing.d.ts +11 -0
  79. package/es/types/TrackballControls.d.ts +51 -0
  80. package/es/types/View3d.d.ts +357 -0
  81. package/es/types/Volume.d.ts +152 -0
  82. package/es/types/VolumeCache.d.ts +43 -0
  83. package/es/types/VolumeDims.d.ts +28 -0
  84. package/es/types/VolumeDrawable.d.ts +108 -0
  85. package/es/types/VolumeMaker.d.ts +49 -0
  86. package/es/types/VolumeRenderImpl.d.ts +22 -0
  87. package/es/types/VolumeRenderSettings.d.ts +98 -0
  88. package/es/types/constants/basicShaders.d.ts +4 -0
  89. package/es/types/constants/colors.d.ts +2 -0
  90. package/es/types/constants/denoiseShader.d.ts +40 -0
  91. package/es/types/constants/lights.d.ts +38 -0
  92. package/es/types/constants/materials.d.ts +20 -0
  93. package/es/types/constants/pathtraceOutputShader.d.ts +11 -0
  94. package/es/types/constants/scaleBarSVG.d.ts +2 -0
  95. package/es/types/constants/time.d.ts +19 -0
  96. package/es/types/constants/volumePTshader.d.ts +137 -0
  97. package/es/types/constants/volumeRayMarchShader.d.ts +117 -0
  98. package/es/types/constants/volumeSliceShader.d.ts +109 -0
  99. package/es/types/glsl.d.js +0 -0
  100. package/es/types/index.d.ts +28 -0
  101. package/es/types/loaders/IVolumeLoader.d.ts +113 -0
  102. package/es/types/loaders/JsonImageInfoLoader.d.ts +80 -0
  103. package/es/types/loaders/OmeZarrLoader.d.ts +87 -0
  104. package/es/types/loaders/OpenCellLoader.d.ts +9 -0
  105. package/es/types/loaders/RawArrayLoader.d.ts +33 -0
  106. package/es/types/loaders/TiffLoader.d.ts +45 -0
  107. package/es/types/loaders/VolumeLoadError.d.ts +18 -0
  108. package/es/types/loaders/VolumeLoaderUtils.d.ts +38 -0
  109. package/es/types/loaders/index.d.ts +22 -0
  110. package/es/types/loaders/zarr_utils/ChunkPrefetchIterator.d.ts +22 -0
  111. package/es/types/loaders/zarr_utils/WrappedStore.d.ts +24 -0
  112. package/es/types/loaders/zarr_utils/types.d.ts +94 -0
  113. package/es/types/loaders/zarr_utils/utils.d.ts +23 -0
  114. package/es/types/loaders/zarr_utils/validation.d.ts +7 -0
  115. package/es/types/test/ChunkPrefetchIterator.test.d.ts +1 -0
  116. package/es/types/test/RequestQueue.test.d.ts +1 -0
  117. package/es/types/test/SubscribableRequestQueue.test.d.ts +1 -0
  118. package/es/types/test/VolumeCache.test.d.ts +1 -0
  119. package/es/types/test/VolumeRenderSettings.test.d.ts +1 -0
  120. package/es/types/test/lut.test.d.ts +1 -0
  121. package/es/types/test/num_utils.test.d.ts +1 -0
  122. package/es/types/test/volume.test.d.ts +1 -0
  123. package/es/types/test/zarr_utils.test.d.ts +1 -0
  124. package/es/types/types.d.ts +115 -0
  125. package/es/types/utils/RequestQueue.d.ts +112 -0
  126. package/es/types/utils/SubscribableRequestQueue.d.ts +52 -0
  127. package/es/types/utils/num_utils.d.ts +43 -0
  128. package/es/types/workers/VolumeLoaderContext.d.ts +106 -0
  129. package/es/types/workers/types.d.ts +101 -0
  130. package/es/types/workers/util.d.ts +3 -0
  131. package/es/types.js +75 -0
  132. package/es/typings.d.js +0 -0
  133. package/es/utils/RequestQueue.js +267 -0
  134. package/es/utils/SubscribableRequestQueue.js +187 -0
  135. package/es/utils/num_utils.js +231 -0
  136. package/es/workers/FetchTiffWorker.js +153 -0
  137. package/es/workers/VolumeLoadWorker.js +129 -0
  138. package/es/workers/VolumeLoaderContext.js +271 -0
  139. package/es/workers/types.js +41 -0
  140. package/es/workers/util.js +8 -0
  141. package/package.json +83 -0
@@ -0,0 +1,153 @@
1
+ import { Texture, Vector2, Vector3, Vector4 } from "three";
2
+ import { Light, AREA_LIGHT, SKY_LIGHT } from "../Light.js";
3
+ /* babel-plugin-inline-import './shaders/pathtrace.frag' */
4
+ const pathTraceFragmentShader = "precision highp float;\nprecision highp int;\nprecision highp sampler2D;\nprecision highp sampler3D;\n\n#define PI (3.1415926535897932384626433832795)\n#define PI_OVER_2 (1.57079632679489661923)\n#define PI_OVER_4 (0.785398163397448309616)\n#define INV_PI (1.0/PI)\n#define INV_2_PI (0.5/PI)\n#define INV_4_PI (0.25/PI)\n\nconst vec3 BLACK = vec3(0,0,0);\nconst vec3 WHITE = vec3(1.0,1.0,1.0);\nconst int ShaderType_Brdf = 0;\nconst int ShaderType_Phase = 1;\nconst int ShaderType_Mixed = 2;\nconst float MAX_RAY_LEN = 1500000.0f;\n\nin vec2 vUv;\n\nstruct Camera {\n vec3 mFrom;\n vec3 mU, mV, mN;\n vec4 mScreen; // left, right, bottom, top\n vec2 mInvScreen; // 1/w, 1/h\n float mFocalDistance;\n float mApertureSize;\n float mIsOrtho; // 1 or 0\n};\n\nuniform Camera gCamera;\n\nstruct Light {\n float mTheta;\n float mPhi;\n float mWidth;\n float mHalfWidth;\n float mHeight;\n float mHalfHeight;\n float mDistance;\n float mSkyRadius;\n vec3 mP;\n vec3 mTarget;\n vec3 mN;\n vec3 mU;\n vec3 mV;\n float mArea;\n float mAreaPdf;\n vec3 mColor;\n vec3 mColorTop;\n vec3 mColorMiddle;\n vec3 mColorBottom;\n int mT;\n};\nconst int NUM_LIGHTS = 2;\nuniform Light gLights[2];\n\nuniform vec3 gClippedAaBbMin;\nuniform vec3 gClippedAaBbMax;\nuniform vec3 gVolCenter;\nuniform float gDensityScale;\nuniform float gStepSize;\nuniform float gStepSizeShadow;\nuniform sampler3D volumeTexture;\nuniform vec3 gInvAaBbMax;\nuniform int gNChannels;\nuniform int gShadingType;\nuniform vec3 gGradientDeltaX;\nuniform vec3 gGradientDeltaY;\nuniform vec3 gGradientDeltaZ;\nuniform float gInvGradientDelta;\nuniform float gGradientFactor;\nuniform float uShowLights;\nuniform vec3 flipVolume;\n\n// per channel\n// the luttexture is a 256x4 rgba texture\n// each row is a 256 element lookup table.\nuniform sampler2D gLutTexture;\nuniform vec4 gIntensityMax;\nuniform vec4 gIntensityMin;\nuniform float gOpacity[4];\nuniform vec3 gEmissive[4];\nuniform vec3 gDiffuse[4];\nuniform vec3 gSpecular[4];\nuniform float gGlossiness[4];\n\n// compositing / progressive render\nuniform float uFrameCounter;\nuniform float uSampleCounter;\nuniform vec2 uResolution;\nuniform sampler2D tPreviousTexture;\n\n// from iq https://www.shadertoy.com/view/4tXyWN\nfloat rand( inout uvec2 seed )\n{\n seed += uvec2(1);\n uvec2 q = 1103515245U * ( (seed >> 1U) ^ (seed.yx) );\n uint n = 1103515245U * ( (q.x) ^ (q.y >> 3U) );\n return float(n) * (1.0 / float(0xffffffffU));\n}\n\nvec3 XYZtoRGB(vec3 xyz) {\n return vec3(\n 3.240479f*xyz[0] - 1.537150f*xyz[1] - 0.498535f*xyz[2],\n -0.969256f*xyz[0] + 1.875991f*xyz[1] + 0.041556f*xyz[2],\n 0.055648f*xyz[0] - 0.204043f*xyz[1] + 1.057311f*xyz[2]\n );\n}\n\n// Used to convert from linear RGB to XYZ space\nconst mat3 RGB_2_XYZ = (mat3(\n 0.4124564, 0.3575761, 0.1804375,\n 0.2126729, 0.7151522, 0.0721750,\n 0.0193339, 0.1191920, 0.9503041\n));\nvec3 RGBtoXYZ(vec3 rgb) {\n return rgb * RGB_2_XYZ;\n}\n\nvec3 getUniformSphereSample(in vec2 U)\n{\n float z = 1.f - 2.f * U.x;\n float r = sqrt(max(0.f, 1.f - z*z));\n float phi = 2.f * PI * U.y;\n float x = r * cos(phi);\n float y = r * sin(phi);\n return vec3(x, y, z);\n}\n\nfloat SphericalPhi(in vec3 Wl)\n{\n float p = atan(Wl.z, Wl.x);\n return (p < 0.f) ? p + 2.f * PI : p;\n}\n\nfloat SphericalTheta(in vec3 Wl)\n{\n return acos(clamp(Wl.y, -1.f, 1.f));\n}\n\nbool SameHemisphere(in vec3 Ww1, in vec3 Ww2)\n{\n return (Ww1.z * Ww2.z) > 0.0f;\n}\n\nvec2 getConcentricDiskSample(in vec2 U)\n{\n float r, theta;\n // Map 0..1 to -1..1\n float sx = 2.0 * U.x - 1.0;\n float sy = 2.0 * U.y - 1.0;\n\n // Map square to (r,theta)\n\n // Handle degeneracy at the origin\n if (sx == 0.0 && sy == 0.0)\n {\n return vec2(0.0f, 0.0f);\n }\n\n // quadrants of disk\n if (sx >= -sy)\n {\n if (sx > sy)\n {\n r = sx;\n if (sy > 0.0)\n theta = sy/r;\n else\n theta = 8.0f + sy/r;\n }\n else\n {\n r = sy;\n theta = 2.0f - sx/r;\n }\n }\n else\n {\n if (sx <= sy)\n {\n r = -sx;\n theta = 4.0f - sy/r;\n }\n else\n {\n r = -sy;\n theta = 6.0f + sx/r;\n }\n }\n\n theta *= PI_OVER_4;\n\n return vec2(r*cos(theta), r*sin(theta));\n}\n\nvec3 getCosineWeightedHemisphereSample(in vec2 U)\n{\n vec2 ret = getConcentricDiskSample(U);\n return vec3(ret.x, ret.y, sqrt(max(0.f, 1.f - ret.x * ret.x - ret.y * ret.y)));\n}\n\nstruct Ray {\n vec3 m_O;\n vec3 m_D;\n float m_MinT, m_MaxT;\n};\n\nvec3 rayAt(Ray r, float t) {\n return r.m_O + t*r.m_D;\n}\n\nRay GenerateCameraRay(in Camera cam, in vec2 Pixel, in vec2 ApertureRnd)\n{\n // negating ScreenPoint.y flips the up/down direction. depends on whether you want pixel 0 at top or bottom\n // we could also have flipped mScreen and mInvScreen, or cam.mV?\n vec2 ScreenPoint = vec2(\n cam.mScreen.x + (cam.mInvScreen.x * Pixel.x),\n cam.mScreen.z + (cam.mInvScreen.y * Pixel.y)\n );\n vec3 dxy = (ScreenPoint.x * cam.mU) + (-ScreenPoint.y * cam.mV);\n\n // orthographic camera ray: start at (camera pos + screen point), go in direction N\n // perspective camera ray: start at camera pos, go in direction (N + screen point)\n vec3 RayO = cam.mFrom + cam.mIsOrtho * dxy;\n vec3 RayD = normalize(cam.mN + (1.0 - cam.mIsOrtho) * dxy);\n\n if (cam.mApertureSize != 0.0f)\n {\n vec2 LensUV = cam.mApertureSize * getConcentricDiskSample(ApertureRnd);\n\n vec3 LI = cam.mU * LensUV.x + cam.mV * LensUV.y;\n RayO += LI;\n RayD = normalize((RayD * cam.mFocalDistance) - LI);\n }\n\n return Ray(RayO, RayD, 0.0, MAX_RAY_LEN);\n}\n\nbool IntersectBox(in Ray R, out float pNearT, out float pFarT)\n{\n vec3 invR\t\t= vec3(1.0f, 1.0f, 1.0f) / R.m_D;\n vec3 bottomT\t\t= invR * (vec3(gClippedAaBbMin.x, gClippedAaBbMin.y, gClippedAaBbMin.z) - R.m_O);\n vec3 topT\t\t= invR * (vec3(gClippedAaBbMax.x, gClippedAaBbMax.y, gClippedAaBbMax.z) - R.m_O);\n vec3 minT\t\t= min(topT, bottomT);\n vec3 maxT\t\t= max(topT, bottomT);\n float largestMinT = max(max(minT.x, minT.y), max(minT.x, minT.z));\n float smallestMaxT = min(min(maxT.x, maxT.y), min(maxT.x, maxT.z));\n\n pNearT = largestMinT;\n pFarT\t= smallestMaxT;\n\n return smallestMaxT > largestMinT;\n}\n\n// assume volume is centered at 0,0,0 so p spans -bounds to + bounds\n// transform p to range from 0,0,0 to 1,1,1 for volume texture sampling.\n// optionally invert axes\nvec3 PtoVolumeTex(vec3 p) {\n vec3 uvw = (p - gVolCenter) * gInvAaBbMax + vec3(0.5, 0.5, 0.5);\n // if flipVolume = 1, uvw is unchanged.\n // if flipVolume = -1, uvw = 1 - uvw\n uvw = (flipVolume*(uvw - 0.5) + 0.5);\n return uvw;\n}\n\nconst float UINT8_MAX = 1.0;//255.0;\n\n// strategy: sample up to 4 channels, and take the post-LUT maximum intensity as the channel that wins\n// we will return the unmapped raw intensity value from the volume so that other luts can be applied again later.\nfloat GetNormalizedIntensityMax4ch(in vec3 P, out int ch)\n{\n vec4 intensity = UINT8_MAX * texture(volumeTexture, PtoVolumeTex(P));\n\n //intensity = (intensity - gIntensityMin) / (gIntensityMax - gIntensityMin);\n vec4 ilut = vec4(0.0, 0.0, 0.0, 0.0);\n // w in the lut texture is \"opacity\"\n ilut.x = texture(gLutTexture, vec2(intensity.x, 0.5/4.0)).w / 255.0;\n ilut.y = texture(gLutTexture, vec2(intensity.y, 1.5/4.0)).w / 255.0;\n ilut.z = texture(gLutTexture, vec2(intensity.z, 2.5/4.0)).w / 255.0;\n ilut.w = texture(gLutTexture, vec2(intensity.w, 3.5/4.0)).w / 255.0;\n\n float maxIn = 0.0;\n float iOut = 0.0;\n ch = 0;\n for (int i = 0; i < min(gNChannels, 4); ++i) {\n if (ilut[i] > maxIn) {\n maxIn = ilut[i];\n ch = i;\n iOut = intensity[i];\n }\n }\n\n //return maxIn;\n return iOut;\n}\n\nfloat GetNormalizedIntensity4ch(vec3 P, int ch)\n{\n vec4 intensity = UINT8_MAX * texture(volumeTexture, PtoVolumeTex(P));\n // select channel\n float intensityf = intensity[ch];\n //intensityf = (intensityf - gIntensityMin[ch]) / (gIntensityMax[ch] - gIntensityMin[ch]);\n //intensityf = texture(gLutTexture, vec2(intensityf, (0.5+float(ch))/4.0)).x;\n\n return intensityf;\n}\n\n// note that gInvGradientDelta is maxpixeldim of volume\n// gGradientDeltaX,Y,Z is 1/X,Y,Z of volume\nvec3 Gradient4ch(vec3 P, int ch)\n{\n vec3 Gradient;\n\n Gradient.x = (GetNormalizedIntensity4ch(P + (gGradientDeltaX), ch) - GetNormalizedIntensity4ch(P - (gGradientDeltaX), ch)) * gInvGradientDelta;\n Gradient.y = (GetNormalizedIntensity4ch(P + (gGradientDeltaY), ch) - GetNormalizedIntensity4ch(P - (gGradientDeltaY), ch)) * gInvGradientDelta;\n Gradient.z = (GetNormalizedIntensity4ch(P + (gGradientDeltaZ), ch) - GetNormalizedIntensity4ch(P - (gGradientDeltaZ), ch)) * gInvGradientDelta;\n\n return Gradient;\n}\n\nfloat GetOpacity(float NormalizedIntensity, int ch)\n{\n // apply lut\n float o = texture(gLutTexture, vec2(NormalizedIntensity, (0.5+float(ch))/4.0)).w / 255.0;\n float Intensity = o * gOpacity[ch];\n return Intensity;\n}\n\nvec3 GetEmissionN(float NormalizedIntensity, int ch)\n{\n return gEmissive[ch];\n}\n\nvec3 GetDiffuseN(float NormalizedIntensity, int ch)\n{\n vec4 col = texture(gLutTexture, vec2(NormalizedIntensity, (0.5+float(ch))/4.0));\n //vec3 col = vec3(1.0, 1.0, 1.0);\n return col.xyz * gDiffuse[ch];\n}\n\nvec3 GetSpecularN(float NormalizedIntensity, int ch)\n{\n return gSpecular[ch];\n}\n\nfloat GetGlossinessN(float NormalizedIntensity, int ch)\n{\n return gGlossiness[ch];\n}\n\n// a bsdf sample, a sample on a light source, and a randomly chosen light index\nstruct LightingSample {\n float m_bsdfComponent;\n vec2 m_bsdfDir;\n vec2 m_lightPos;\n float m_lightComponent;\n float m_LightNum;\n};\n\nLightingSample LightingSample_LargeStep(inout uvec2 seed) {\n return LightingSample(\n rand(seed),\n vec2(rand(seed), rand(seed)),\n vec2(rand(seed), rand(seed)),\n rand(seed),\n rand(seed)\n );\n}\n\n// return a color xyz\nvec3 Light_Le(in Light light, in vec2 UV)\n{\n if (light.mT == 0)\n return RGBtoXYZ(light.mColor) / light.mArea;\n\n if (light.mT == 1)\n {\n if (UV.y > 0.0f)\n return RGBtoXYZ(mix(light.mColorMiddle, light.mColorTop, abs(UV.y)));\n else\n return RGBtoXYZ(mix(light.mColorMiddle, light.mColorBottom, abs(UV.y)));\n }\n\n return BLACK;\n}\n\n// return a color xyz\nvec3 Light_SampleL(in Light light, in vec3 P, out Ray Rl, out float Pdf, in LightingSample LS)\n{\n vec3 L = BLACK;\n Pdf = 0.0;\n vec3 Ro = vec3(0,0,0), Rd = vec3(0,0,1);\n if (light.mT == 0)\n {\n Ro = (light.mP + ((-0.5f + LS.m_lightPos.x) * light.mWidth * light.mU) + ((-0.5f + LS.m_lightPos.y) * light.mHeight * light.mV));\n Rd = normalize(P - Ro);\n L = dot(Rd, light.mN) > 0.0f ? Light_Le(light, vec2(0.0f)) : BLACK;\n Pdf = abs(dot(Rd, light.mN)) > 0.0f ? dot(P-Ro, P-Ro) / (abs(dot(Rd, light.mN)) * light.mArea) : 0.0f;\n }\n else if (light.mT == 1)\n {\n Ro = light.mP + light.mSkyRadius * getUniformSphereSample(LS.m_lightPos);\n Rd = normalize(P - Ro);\n L = Light_Le(light, vec2(1.0f) - 2.0f * LS.m_lightPos);\n Pdf = pow(light.mSkyRadius, 2.0f) / light.mArea;\n }\n\n Rl = Ray(Ro, Rd, 0.0f, length(P - Ro));\n\n return L;\n}\n\n// Intersect ray with light\nbool Light_Intersect(Light light, inout Ray R, out float T, out vec3 L, out float pPdf)\n{\n if (light.mT == 0)\n {\n // Compute projection\n float DotN = dot(R.m_D, light.mN);\n\n // Ray is coplanar with light surface\n if (DotN >= 0.0f)\n return false;\n\n // Compute hit distance\n T = (-light.mDistance - dot(R.m_O, light.mN)) / DotN;\n\n // Intersection is in ray's negative direction\n if (T < R.m_MinT || T > R.m_MaxT)\n return false;\n\n // Determine position on light\n vec3 Pl = rayAt(R, T);\n\n // Vector from point on area light to center of area light\n vec3 Wl = Pl - light.mP;\n\n // Compute texture coordinates\n vec2 UV = vec2(dot(Wl, light.mU), dot(Wl, light.mV));\n\n // Check if within bounds of light surface\n if (UV.x > light.mHalfWidth || UV.x < -light.mHalfWidth || UV.y > light.mHalfHeight || UV.y < -light.mHalfHeight)\n return false;\n\n R.m_MaxT = T;\n\n //pUV = UV;\n\n if (DotN < 0.0f)\n L = RGBtoXYZ(light.mColor) / light.mArea;\n else\n L = BLACK;\n\n pPdf = dot(R.m_O-Pl, R.m_O-Pl) / (DotN * light.mArea);\n\n return true;\n }\n\n else if (light.mT == 1)\n {\n T = light.mSkyRadius;\n\n // Intersection is in ray's negative direction\n if (T < R.m_MinT || T > R.m_MaxT)\n return false;\n\n R.m_MaxT = T;\n\n vec2 UV = vec2(SphericalPhi(R.m_D) * INV_2_PI, SphericalTheta(R.m_D) * INV_PI);\n\n L = Light_Le(light, vec2(1.0f,1.0f) - 2.0f * UV);\n\n pPdf = pow(light.mSkyRadius, 2.0f) / light.mArea;\n //pUV = UV;\n\n return true;\n }\n\n return false;\n}\n\nfloat Light_Pdf(in Light light, in vec3 P, in vec3 Wi)\n{\n vec3 L;\n vec2 UV;\n float Pdf = 1.0f;\n\n Ray Rl = Ray(P, Wi, 0.0f, 100000.0f);\n\n if (light.mT == 0)\n {\n float T = 0.0f;\n\n if (!Light_Intersect(light, Rl, T, L, Pdf))\n return 0.0f;\n\n return pow(T, 2.0f) / (abs(dot(light.mN, -Wi)) * light.mArea);\n }\n\n else if (light.mT == 1)\n {\n return pow(light.mSkyRadius, 2.0f) / light.mArea;\n }\n\n return 0.0f;\n}\n\nstruct VolumeShader {\n int m_Type; // 0 = bsdf, 1 = phase\n\n vec3 m_Kd; // isotropic phase // xyz color\n vec3 m_R; // specular reflectance\n float m_Ior;\n float m_Exponent;\n vec3 m_Nn;\n vec3 m_Nu;\n vec3 m_Nv;\n};\n\n// return a xyz color\nvec3 ShaderPhase_F(in VolumeShader shader, in vec3 Wo, in vec3 Wi)\n{\n return shader.m_Kd * INV_PI;\n}\n\nfloat ShaderPhase_Pdf(in VolumeShader shader, in vec3 Wo, in vec3 Wi)\n{\n return INV_4_PI;\n}\n\nvec3 ShaderPhase_SampleF(in VolumeShader shader, in vec3 Wo, out vec3 Wi, out float Pdf, in vec2 U)\n{\n Wi\t= getUniformSphereSample(U);\n Pdf\t= ShaderPhase_Pdf(shader, Wo, Wi);\n\n return ShaderPhase_F(shader, Wo, Wi);\n}\n\n// return a xyz color\nvec3 Lambertian_F(in VolumeShader shader, in vec3 Wo, in vec3 Wi)\n{\n return shader.m_Kd * INV_PI;\n}\n\nfloat Lambertian_Pdf(in VolumeShader shader, in vec3 Wo, in vec3 Wi)\n{\n //return abs(Wi.z)*INV_PI;\n return SameHemisphere(Wo, Wi) ? abs(Wi.z) * INV_PI : 0.0f;\n}\n\n// return a xyz color\nvec3 Lambertian_SampleF(in VolumeShader shader, in vec3 Wo, out vec3 Wi, out float Pdf, in vec2 U)\n{\n Wi = getCosineWeightedHemisphereSample(U);\n\n if (Wo.z < 0.0f)\n Wi.z *= -1.0f;\n\n Pdf = Lambertian_Pdf(shader, Wo, Wi);\n\n return Lambertian_F(shader, Wo, Wi);\n}\n\nvec3 SphericalDirection(in float SinTheta, in float CosTheta, in float Phi)\n{\n return vec3(SinTheta * cos(Phi), SinTheta * sin(Phi), CosTheta);\n}\n\nvoid Blinn_SampleF(in VolumeShader shader, in vec3 Wo, out vec3 Wi, out float Pdf, in vec2 U)\n{\n // Compute sampled half-angle vector wh for Blinn distribution\n float costheta = pow(U.x, 1.f / (shader.m_Exponent+1.0));\n float sintheta = sqrt(max(0.f, 1.f - costheta*costheta));\n float phi = U.y * 2.f * PI;\n\n vec3 wh = SphericalDirection(sintheta, costheta, phi);\n\n if (!SameHemisphere(Wo, wh))\n wh = -wh;\n\n // Compute incident direction by reflecting about wh\n Wi = -Wo + 2.f * dot(Wo, wh) * wh;\n\n // Compute PDF for wi from Blinn distribution\n float blinn_pdf = ((shader.m_Exponent + 1.f) * pow(costheta, shader.m_Exponent)) / (2.f * PI * 4.f * dot(Wo, wh));\n\n if (dot(Wo, wh) <= 0.f)\n blinn_pdf = 0.f;\n\n Pdf = blinn_pdf;\n}\n\nfloat Blinn_D(in VolumeShader shader, in vec3 wh)\n{\n float costhetah = abs(wh.z);//AbsCosTheta(wh);\n return (shader.m_Exponent+2.0) * INV_2_PI * pow(costhetah, shader.m_Exponent);\n}\nfloat Microfacet_G(in VolumeShader shader, in vec3 wo, in vec3 wi, in vec3 wh)\n{\n float NdotWh = abs(wh.z);//AbsCosTheta(wh);\n float NdotWo = abs(wo.z);//AbsCosTheta(wo);\n float NdotWi = abs(wi.z);//AbsCosTheta(wi);\n float WOdotWh = abs(dot(wo, wh));\n\n return min(1.f, min((2.f * NdotWh * NdotWo / WOdotWh), (2.f * NdotWh * NdotWi / WOdotWh)));\n}\n\nvec3 Microfacet_F(in VolumeShader shader, in vec3 wo, in vec3 wi)\n{\n float cosThetaO = abs(wo.z);//AbsCosTheta(wo);\n float cosThetaI = abs(wi.z);//AbsCosTheta(wi);\n\n if (cosThetaI == 0.f || cosThetaO == 0.f)\n return BLACK;\n\n vec3 wh = wi + wo;\n\n if (wh.x == 0. && wh.y == 0. && wh.z == 0.)\n return BLACK;\n\n wh = normalize(wh);\n float cosThetaH = dot(wi, wh);\n\n vec3 F = WHITE;//m_Fresnel.Evaluate(cosThetaH);\n\n return shader.m_R * Blinn_D(shader, wh) * Microfacet_G(shader, wo, wi, wh) * F / (4.f * cosThetaI * cosThetaO);\n}\n\nvec3 ShaderBsdf_WorldToLocal(in VolumeShader shader, in vec3 W)\n{\n return vec3(dot(W, shader.m_Nu), dot(W, shader.m_Nv), dot(W, shader.m_Nn));\n}\n\nvec3 ShaderBsdf_LocalToWorld(in VolumeShader shader, in vec3 W)\n{\n return vec3(\tshader.m_Nu.x * W.x + shader.m_Nv.x * W.y + shader.m_Nn.x * W.z,\n shader.m_Nu.y * W.x + shader.m_Nv.y * W.y + shader.m_Nn.y * W.z,\n shader.m_Nu.z * W.x + shader.m_Nv.z * W.y + shader.m_Nn.z * W.z);\n}\n\nfloat Blinn_Pdf(in VolumeShader shader, in vec3 Wo, in vec3 Wi)\n{\n vec3 wh = normalize(Wo + Wi);\n\n float costheta = abs(wh.z);//AbsCosTheta(wh);\n // Compute PDF for wi from Blinn distribution\n float blinn_pdf = ((shader.m_Exponent + 1.f) * pow(costheta, shader.m_Exponent)) / (2.f * PI * 4.f * dot(Wo, wh));\n\n if (dot(Wo, wh) <= 0.0f)\n blinn_pdf = 0.0f;\n\n return blinn_pdf;\n}\n\nvec3 Microfacet_SampleF(in VolumeShader shader, in vec3 wo, out vec3 wi, out float Pdf, in vec2 U)\n{\n Blinn_SampleF(shader, wo, wi, Pdf, U);\n\n if (!SameHemisphere(wo, wi))\n return BLACK;\n\n return Microfacet_F(shader, wo, wi);\n}\n\nfloat Microfacet_Pdf(in VolumeShader shader, in vec3 wo, in vec3 wi)\n{\n if (!SameHemisphere(wo, wi))\n return 0.0f;\n\n return Blinn_Pdf(shader, wo, wi);\n}\n\n// return a xyz color\nvec3 ShaderBsdf_F(in VolumeShader shader, in vec3 Wo, in vec3 Wi)\n{\n vec3 Wol = ShaderBsdf_WorldToLocal(shader, Wo);\n vec3 Wil = ShaderBsdf_WorldToLocal(shader, Wi);\n\n vec3 R = vec3(0,0,0);\n\n R += Lambertian_F(shader, Wol, Wil);\n R += Microfacet_F(shader, Wol, Wil);\n\n return R;\n}\n\nfloat ShaderBsdf_Pdf(in VolumeShader shader, in vec3 Wo, in vec3 Wi)\n{\n vec3 Wol = ShaderBsdf_WorldToLocal(shader, Wo);\n vec3 Wil = ShaderBsdf_WorldToLocal(shader, Wi);\n\n float Pdf = 0.0f;\n\n Pdf += Lambertian_Pdf(shader, Wol, Wil);\n Pdf += Microfacet_Pdf(shader, Wol, Wil);\n\n return Pdf;\n}\n\n\nvec3 ShaderBsdf_SampleF(in VolumeShader shader, in LightingSample S, in vec3 Wo, out vec3 Wi, out float Pdf, in vec2 U)\n{\n vec3 Wol = ShaderBsdf_WorldToLocal(shader, Wo);\n vec3 Wil = vec3(0,0,0);\n\n vec3 R = vec3(0,0,0);\n\n if (S.m_bsdfComponent <= 0.5f)\n {\n Lambertian_SampleF(shader, Wol, Wil, Pdf, S.m_bsdfDir);\n }\n else\n {\n Microfacet_SampleF(shader, Wol, Wil, Pdf, S.m_bsdfDir);\n }\n\n Pdf += Lambertian_Pdf(shader, Wol, Wil);\n Pdf += Microfacet_Pdf(shader, Wol, Wil);\n\n R += Lambertian_F(shader, Wol, Wil);\n R += Microfacet_F(shader, Wol, Wil);\n\n Wi = ShaderBsdf_LocalToWorld(shader, Wil);\n\n //return vec3(1,1,1);\n return R;\n}\n\n// return a xyz color\nvec3 Shader_F(in VolumeShader shader, in vec3 Wo, in vec3 Wi)\n{\n if (shader.m_Type == 0) {\n return ShaderBsdf_F(shader, Wo, Wi);\n }\n else {\n return ShaderPhase_F(shader, Wo, Wi);\n }\n}\n\nfloat Shader_Pdf(in VolumeShader shader, in vec3 Wo, in vec3 Wi)\n{\n if (shader.m_Type == 0) {\n return ShaderBsdf_Pdf(shader, Wo, Wi);\n }\n else {\n return ShaderPhase_Pdf(shader, Wo, Wi);\n }\n}\n\nvec3 Shader_SampleF(in VolumeShader shader, in LightingSample S, in vec3 Wo, out vec3 Wi, out float Pdf, in vec2 U)\n{\n //return vec3(1,0,0);\n if (shader.m_Type == 0) {\n return ShaderBsdf_SampleF(shader, S, Wo, Wi, Pdf, U);\n }\n else {\n return ShaderPhase_SampleF(shader, Wo, Wi, Pdf, U);\n }\n}\n\n\nbool IsBlack(in vec3 v) {\n return (v.x==0.0 && v.y == 0.0 && v.z == 0.0);\n}\n\nfloat PowerHeuristic(float nf, float fPdf, float ng, float gPdf)\n{\n float f = nf * fPdf;\n float g = ng * gPdf;\n // The power heuristic is Veach's MIS balance heuristic except each component is being squared\n // balance heuristic would be f/(f+g) ...?\n return (f * f) / (f * f + g * g);\n}\n\nfloat MISContribution(float pdf1, float pdf2)\n{\n return PowerHeuristic(1.0f, pdf1, 1.0f, pdf2);\n}\n\n// \"shadow ray\" using gStepSizeShadow, test whether it can exit the volume or not\nbool DoesSecondaryRayScatterInVolume(inout Ray R, inout uvec2 seed)\n{\n float MinT;\n float MaxT;\n vec3 Ps;\n\n if (!IntersectBox(R, MinT, MaxT))\n return false;\n\n MinT = max(MinT, R.m_MinT);\n MaxT = min(MaxT, R.m_MaxT);\n\n // delta (Woodcock) tracking\n float S\t= -log(rand(seed)) / gDensityScale;\n float Sum = 0.0f;\n float SigmaT = 0.0f;\n\n MinT += rand(seed) * gStepSizeShadow;\n int ch = 0;\n float intensity = 0.0;\n while (Sum < S)\n {\n Ps = rayAt(R, MinT); // R.m_O + MinT * R.m_D;\n\n if (MinT > MaxT)\n return false;\n\n intensity = GetNormalizedIntensityMax4ch(Ps, ch);\n SigmaT = gDensityScale * GetOpacity(intensity, ch);\n\n Sum += SigmaT * gStepSizeShadow;\n MinT += gStepSizeShadow;\n }\n\n return true;\n}\n\nint GetNearestLight(Ray R, out vec3 oLightColor, out vec3 Pl, out float oPdf)\n{\n int hit = -1;\n float T = 0.0f;\n Ray rayCopy = R;\n float pdf = 0.0f;\n\n for (int i = 0; i < 2; i++)\n {\n if (Light_Intersect(gLights[i], rayCopy, T, oLightColor, pdf))\n {\n Pl = rayAt(R, T);\n hit = i;\n }\n }\n oPdf = pdf;\n\n return hit;\n}\n\n// return a XYZ color\n// Wo is direction from scatter point out toward incident ray direction\n\n// Wi goes toward light sample and is not necessarily perfect reflection of Wo\n// ^Wi ^N ^Wo\n// \\\\ | //\n// \\\\ | //\n// \\\\ | //\n// \\\\ | //\n// \\\\|// Pe = volume sample where scattering occurs\n// ---------\nvec3 EstimateDirectLight(int shaderType, float Density, int ch, in Light light, in LightingSample LS, in vec3 Wo, in vec3 Pe, in vec3 N, inout uvec2 seed)\n{\n vec3 Ld = BLACK, Li = BLACK, F = BLACK;\n\n vec3 diffuse = GetDiffuseN(Density, ch);\n vec3 specular = GetSpecularN(Density, ch);\n float glossiness = GetGlossinessN(Density, ch);\n\n // can N and Wo be coincident????\n vec3 nu = normalize(cross(N, Wo));\n vec3 nv = normalize(cross(N, nu));\n\n // the IoR here is hard coded... and unused!!!!\n VolumeShader Shader = VolumeShader(shaderType, RGBtoXYZ(diffuse), RGBtoXYZ(specular), 2.5f, glossiness, N, nu, nv);\n\n float LightPdf = 1.0f, ShaderPdf = 1.0f;\n\n Ray Rl = Ray(vec3(0,0,0), vec3(0,0,1.0), 0.0, MAX_RAY_LEN);\n // Rl is ray from light toward Pe in volume, with a max traversal of the distance from Pe to Light sample pos.\n Li = Light_SampleL(light, Pe, Rl, LightPdf, LS);\n\n // Wi: negate ray direction: from volume scatter point toward light...?\n vec3 Wi = -Rl.m_D, P = vec3(0,0,0);\n\n // we will calculate two lighting contributions and combine them by MIS.\n\n F = Shader_F(Shader,Wo, Wi);\n\n ShaderPdf = Shader_Pdf(Shader, Wo, Wi);\n\n // get a lighting contribution along Rl; see if Rl would scatter in the volume or not\n if (!IsBlack(Li) && (ShaderPdf > 0.0f) && (LightPdf > 0.0f) && !DoesSecondaryRayScatterInVolume(Rl, seed))\n {\n // ray from light can see through volume to Pe!\n\n float dotProd = 1.0;\n if (shaderType == ShaderType_Brdf){\n\n // (use abs or clamp here?)\n dotProd = abs(dot(Wi, N));\n }\n Ld += F * Li * dotProd * MISContribution(LightPdf, ShaderPdf) / LightPdf;\n\n }\n\n // get a lighting contribution by sampling nearest light from the scattering point\n F = Shader_SampleF(Shader, LS, Wo, Wi, ShaderPdf, LS.m_bsdfDir);\n if (!IsBlack(F) && (ShaderPdf > 0.0f))\n {\n vec3 Pl = vec3(0,0,0);\n int n = GetNearestLight(Ray(Pe, Wi, 0.0f, 1000000.0f), Li, Pl, LightPdf);\n if (n > -1)\n {\n Light pLight = gLights[n];\n LightPdf = Light_Pdf(pLight, Pe, Wi);\n\n if ((LightPdf > 0.0f) && !IsBlack(Li)) {\n Ray rr = Ray(Pl, normalize(Pe - Pl), 0.0f, length(Pe - Pl));\n if (!DoesSecondaryRayScatterInVolume(rr, seed))\n {\n float dotProd = 1.0;\n if (shaderType == ShaderType_Brdf){\n\n // (use abs or clamp here?)\n dotProd = abs(dot(Wi, N));\n }\n // note order of MIS params is swapped\n Ld += F * Li * dotProd * MISContribution(ShaderPdf, LightPdf) / ShaderPdf;\n }\n\n }\n }\n }\n\n return Ld;\n\n}\n\n// return a linear xyz color\nvec3 UniformSampleOneLight(int shaderType, float Density, int ch, in vec3 Wo, in vec3 Pe, in vec3 N, inout uvec2 seed)\n{\n //if (NUM_LIGHTS == 0)\n // return BLACK;\n\n // select a random light, a random 2d sample on light, and a random 2d sample on brdf\n LightingSample LS = LightingSample_LargeStep(seed);\n\n int WhichLight = int(floor(LS.m_LightNum * float(NUM_LIGHTS)));\n\n Light light = gLights[WhichLight];\n\n return float(NUM_LIGHTS) * EstimateDirectLight(shaderType, Density, ch, light, LS, Wo, Pe, N, seed);\n\n}\n\nbool SampleScatteringEvent(inout Ray R, inout uvec2 seed, out vec3 Ps)\n{\n float MinT;\n float MaxT;\n\n if (!IntersectBox(R, MinT, MaxT))\n return false;\n\n MinT = max(MinT, R.m_MinT);\n MaxT = min(MaxT, R.m_MaxT);\n\n // delta (Woodcock) tracking\n\n // notes, not necessarily coherent:\n // ray march along the ray's projected path and keep an average sigmaT value.\n // The distance is weighted by the intensity at each ray step sample. High intensity increases the apparent distance.\n // When the distance has become greater than the average sigmaT value given by -log(RandomFloat[0, 1]) / averageSigmaT\n // then that would be considered the interaction position.\n\n // sigmaT = sigmaA + sigmaS = absorption coeff + scattering coeff = extinction coeff\n\n // Beer-Lambert law: transmittance T(t) = exp(-sigmaT*t) where t is a distance!\n\n // importance sampling the exponential function to produce a free path distance S\n // the PDF is p(t) = sigmaT * exp(-sigmaT * t)\n // In a homogeneous volume,\n // S is the free-path distance = -ln(1-zeta)/sigmaT where zeta is a random variable\n // density scale = 0 => S --> 0..inf. Low density means randomly sized ray paths\n // density scale = inf => S --> 0. High density means short ray paths!\n\n // note that ln(x:0..1) is negative\n\n // here gDensityScale represents sigmaMax, a majorant of sigmaT\n // it is a parameter that should be set as close to the max extinction coefficient as possible.\n float S\t= -log(rand(seed)) / gDensityScale;\n\n float Sum\t\t= 0.0f;\n float SigmaT\t= 0.0f; // accumulated extinction along ray march\n\n // start: take one step now.\n MinT += rand(seed) * gStepSize;\n\n int ch = 0;\n float intensity = 0.0;\n\n // ray march until we have traveled S (or hit the maxT of the ray)\n while (Sum < S)\n {\n Ps = rayAt(R, MinT); // R.m_O + MinT * R.m_D;\n\n // if we exit the volume with no scattering\n if (MinT > MaxT)\n return false;\n\n intensity = GetNormalizedIntensityMax4ch(Ps, ch);\n SigmaT = gDensityScale * GetOpacity(intensity, ch);\n\n Sum += SigmaT * gStepSize;\n MinT += gStepSize;\n }\n\n // at this time, MinT - original MinT is the T transmission distance before a scatter event.\n // Ps is the point\n\n return true;\n}\n\n\nvec4 CalculateRadiance(inout uvec2 seed) {\n float r = rand(seed);\n //return vec4(r,0,0,1);\n\n vec3 Lv = BLACK, Li = BLACK;\n\n //Ray Re = Ray(vec3(0,0,0), vec3(0,0,1), 0.0, MAX_RAY_LEN);\n\n vec2 UV = vUv*uResolution + vec2(rand(seed), rand(seed));\n\n Ray Re = GenerateCameraRay(gCamera, UV, vec2(rand(seed), rand(seed)));\n\n //return vec4(vUv, 0.0, 1.0);\n //return vec4(0.5*(Re.m_D + 1.0), 1.0);\n //return vec4(Re.m_D, 1.0);\n\n //Re.m_MinT = 0.0f;\n //Re.m_MaxT = MAX_RAY_LEN;\n\n vec3 Pe = vec3(0,0,0), Pl = vec3(0,0,0);\n float lpdf = 0.0;\n\n float alpha = 0.0;\n // find point Pe along ray Re\n if (SampleScatteringEvent(Re, seed, Pe))\n {\n alpha = 1.0;\n // is there a light between Re.m_O and Pe? (ray's maxT is distance to Pe)\n // (test to see if area light was hit before volume.)\n int i = GetNearestLight(Ray(Re.m_O, Re.m_D, 0.0f, length(Pe - Re.m_O)), Li, Pl, lpdf);\n if (i > -1)\n {\n // set sample pixel value in frame estimate (prior to accumulation)\n return vec4(Li, 1.0);\n }\n\n int ch = 0;\n float D = GetNormalizedIntensityMax4ch(Pe, ch);\n\n // emission from volume\n Lv += RGBtoXYZ(GetEmissionN(D, ch));\n\n vec3 gradient = Gradient4ch(Pe, ch);\n // send ray out from Pe toward light\n switch (gShadingType)\n {\n case ShaderType_Brdf:\n {\n Lv += UniformSampleOneLight(ShaderType_Brdf, D, ch, normalize(-Re.m_D), Pe, normalize(gradient), seed);\n break;\n }\n\n case ShaderType_Phase:\n {\n Lv += 0.5f * UniformSampleOneLight(ShaderType_Phase, D, ch, normalize(-Re.m_D), Pe, normalize(gradient), seed);\n break;\n }\n\n case ShaderType_Mixed:\n {\n //const float GradMag = GradientMagnitude(Pe, volumedata.gradientVolumeTexture[ch]) * (1.0/volumedata.intensityMax[ch]);\n float GradMag = length(gradient);\n float PdfBrdf = (1.0f - exp(-gGradientFactor * GradMag));\n\n vec3 cls; // xyz color\n if (rand(seed) < PdfBrdf) {\n cls = UniformSampleOneLight(ShaderType_Brdf, D, ch, normalize(-Re.m_D), Pe, normalize(gradient), seed);\n }\n else {\n cls = 0.5f * UniformSampleOneLight(ShaderType_Phase, D, ch, normalize(-Re.m_D), Pe, normalize(gradient), seed);\n }\n\n Lv += cls;\n\n break;\n }\n }\n }\n else\n {\n // background color:\n // set Lv to a selected color based on environment light source?\n // if (uShowLights > 0.0) {\n // int n = GetNearestLight(Ray(Re.m_O, Re.m_D, 0.0f, 1000000.0f), Li, Pl, lpdf);\n // if (n > -1)\n // Lv = Li;\n // }\n //Lv = vec3(r,0,0);\n }\n\n // set sample pixel value in frame estimate (prior to accumulation)\n\n return vec4(Lv, alpha);\n}\n\nvec4 CumulativeMovingAverage(vec4 A, vec4 Ax, float N)\n{\n return A + ((Ax - A) / max((N), 1.0f));\n}\n\nvoid main()\n{\n // seed for rand(seed) function\n uvec2 seed = uvec2(uFrameCounter, uFrameCounter + 1.0) * uvec2(gl_FragCoord);\n\n // perform path tracing and get resulting pixel color\n vec4 pixelColor = CalculateRadiance( seed );\n\n vec4 previousColor = texture(tPreviousTexture, vUv);\n if (uSampleCounter < 1.0) {\n previousColor = vec4(0,0,0,0);\n }\n\n pc_fragColor = CumulativeMovingAverage(previousColor, pixelColor, uSampleCounter);\n}\n";
5
+ export const pathTracingFragmentShaderSrc = pathTraceFragmentShader;
6
+
7
+ // Must match values in shader code above.
8
+ const SHADERTYPE_BRDF = 0;
9
+ // const ShaderType_Phase = 1;
10
+ // const ShaderType_Mixed = 2;
11
+
12
+ export const pathTracingUniforms = () => {
13
+ return {
14
+ tPreviousTexture: {
15
+ type: "t",
16
+ value: new Texture()
17
+ },
18
+ uSampleCounter: {
19
+ type: "f",
20
+ value: 0.0
21
+ },
22
+ uFrameCounter: {
23
+ type: "f",
24
+ value: 1.0
25
+ },
26
+ uResolution: {
27
+ type: "v2",
28
+ value: new Vector2()
29
+ },
30
+ ///////////////////////////
31
+ gClippedAaBbMin: {
32
+ type: "v3",
33
+ value: new Vector3(0, 0, 0)
34
+ },
35
+ gClippedAaBbMax: {
36
+ type: "v3",
37
+ value: new Vector3(1, 1, 1)
38
+ },
39
+ gVolCenter: {
40
+ type: "v3",
41
+ value: new Vector3(0, 0, 0)
42
+ },
43
+ gDensityScale: {
44
+ type: "f",
45
+ value: 50.0
46
+ },
47
+ gStepSize: {
48
+ type: "f",
49
+ value: 1.0
50
+ },
51
+ gStepSizeShadow: {
52
+ type: "f",
53
+ value: 1.0
54
+ },
55
+ gInvAaBbMax: {
56
+ type: "v3",
57
+ value: new Vector3()
58
+ },
59
+ gNChannels: {
60
+ type: "i",
61
+ value: 0
62
+ },
63
+ gShadingType: {
64
+ type: "i",
65
+ value: SHADERTYPE_BRDF
66
+ },
67
+ gGradientDeltaX: {
68
+ type: "v3",
69
+ value: new Vector3(0.01, 0, 0)
70
+ },
71
+ gGradientDeltaY: {
72
+ type: "v3",
73
+ value: new Vector3(0, 0.01, 0)
74
+ },
75
+ gGradientDeltaZ: {
76
+ type: "v3",
77
+ value: new Vector3(0, 0, 0.01)
78
+ },
79
+ gInvGradientDelta: {
80
+ type: "f",
81
+ value: 0.0
82
+ },
83
+ // controls the amount of BRDF-like versus phase-function-like shading
84
+ gGradientFactor: {
85
+ type: "f",
86
+ value: 0.25
87
+ },
88
+ gCamera: {
89
+ value: {
90
+ // Camera struct
91
+ mFrom: new Vector3(),
92
+ mU: new Vector3(),
93
+ mV: new Vector3(),
94
+ mN: new Vector3(),
95
+ mScreen: new Vector4(),
96
+ // left, right, bottom, top
97
+ mInvScreen: new Vector2(),
98
+ // 1/w, 1/h
99
+ mFocalDistance: 0.0,
100
+ mApertureSize: 0.0,
101
+ mIsOrtho: 0.0
102
+ }
103
+ },
104
+ gLights: {
105
+ value: [new Light(SKY_LIGHT), new Light(AREA_LIGHT)]
106
+ },
107
+ volumeTexture: {
108
+ type: "t",
109
+ value: new Texture()
110
+ },
111
+ // per channel
112
+ gLutTexture: {
113
+ type: "t",
114
+ value: new Texture()
115
+ },
116
+ gIntensityMax: {
117
+ type: "v4",
118
+ value: new Vector4(1, 1, 1, 1)
119
+ },
120
+ gIntensityMin: {
121
+ type: "v4",
122
+ value: new Vector4(0, 0, 0, 0)
123
+ },
124
+ gOpacity: {
125
+ type: "1fv",
126
+ value: [1, 1, 1, 1]
127
+ },
128
+ gEmissive: {
129
+ type: "v3v",
130
+ value: [new Vector3(0, 0, 0), new Vector3(0, 0, 0), new Vector3(0, 0, 0), new Vector3(0, 0, 0)]
131
+ },
132
+ gDiffuse: {
133
+ type: "v3v",
134
+ value: [new Vector3(1, 0, 0), new Vector3(0, 1, 0), new Vector3(0, 0, 1), new Vector3(1, 0, 1)]
135
+ },
136
+ gSpecular: {
137
+ type: "v3v",
138
+ value: [new Vector3(0, 0, 0), new Vector3(0, 0, 0), new Vector3(0, 0, 0), new Vector3(0, 0, 0)]
139
+ },
140
+ gGlossiness: {
141
+ type: "1fv",
142
+ value: [1, 1, 1, 1]
143
+ },
144
+ uShowLights: {
145
+ type: "f",
146
+ value: 0
147
+ },
148
+ flipVolume: {
149
+ type: "v3",
150
+ value: new Vector3(1, 1, 1)
151
+ }
152
+ };
153
+ };
@@ -0,0 +1,123 @@
1
+ import { Vector2, Vector3, Matrix4, Texture } from "three";
2
+ /* babel-plugin-inline-import './shaders/raymarch.vert' */
3
+ const rayMarchVertexShader = "// switch on high precision floats\n#ifdef GL_ES\nprecision highp float;\n#endif\n\nvarying vec3 pObj;\n\nvoid main() {\n pObj = position;\n gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);\n}\n";
4
+ /* babel-plugin-inline-import './shaders/raymarch.frag' */
5
+ const rayMarchFragmentShader = "\n#ifdef GL_ES\nprecision highp float;\n#endif\n\n#define M_PI 3.14159265358979323846\n\nuniform vec2 iResolution;\nuniform vec2 textureRes;\nuniform float GAMMA_MIN;\nuniform float GAMMA_MAX;\nuniform float GAMMA_SCALE;\nuniform float BRIGHTNESS;\nuniform float DENSITY;\nuniform float maskAlpha;\nuniform vec2 ATLAS_DIMS;\nuniform vec3 AABB_CLIP_MIN;\nuniform float CLIP_NEAR;\nuniform vec3 AABB_CLIP_MAX;\nuniform float CLIP_FAR;\nuniform sampler2D textureAtlas;\nuniform sampler2D textureAtlasMask;\nuniform sampler2D textureDepth;\nuniform int usingPositionTexture;\nuniform int BREAK_STEPS;\nuniform float SLICES;\nuniform float isOrtho;\nuniform float orthoThickness;\nuniform float orthoScale;\nuniform int maxProject;\nuniform bool interpolationEnabled;\nuniform vec3 flipVolume;\nuniform vec3 volumeScale;\n\n// view space to axis-aligned volume box\nuniform mat4 inverseModelViewMatrix;\nuniform mat4 inverseProjMatrix;\n\nvarying vec3 pObj;\n\nfloat powf(float a, float b) {\n return pow(a,b);\n}\n\nfloat rand(vec2 co) {\n float threadId = gl_FragCoord.x/(gl_FragCoord.y + 1.0);\n float bigVal = threadId*1299721.0/911.0;\n vec2 smallVal = vec2(threadId*7927.0/577.0, threadId*104743.0/1039.0);\n return fract(sin(dot(co, smallVal)) * bigVal);\n}\n\nvec4 luma2Alpha(vec4 color, float vmin, float vmax, float C) {\n float x = dot(color.rgb, vec3(0.2125, 0.7154, 0.0721));\n // float x = max(color[2], max(color[0],color[1]));\n float xi = (x-vmin)/(vmax-vmin);\n xi = clamp(xi,0.0,1.0);\n float y = pow(xi,C);\n y = clamp(y,0.0,1.0);\n color[3] = y;\n return color;\n}\n\nvec2 offsetFrontBack(float t) {\n int a = int(t);\n int ax = int(ATLAS_DIMS.x);\n vec2 os = vec2(float(a - (a / ax) * ax), float(a / ax)) / ATLAS_DIMS;\n return clamp(os, vec2(0.0), vec2(1.0) - vec2(1.0) / ATLAS_DIMS);\n}\n\nvec4 sampleAtlasLinear(sampler2D tex, vec4 pos) {\n float bounds = float(pos[0] >= 0.0 && pos[0] <= 1.0 &&\n pos[1] >= 0.0 && pos[1] <= 1.0 &&\n pos[2] >= 0.0 && pos[2] <= 1.0 );\n float nSlices = float(SLICES);\n // get location within atlas tile\n // TODO: get loc1 which follows ray to next slice along ray direction\n // when flipvolume = 1: pos\n // when flipvolume = -1: 1-pos\n vec2 loc0 = ((pos.xy - 0.5) * flipVolume.xy + 0.5) / ATLAS_DIMS;\n\n // loc ranges from 0 to 1/ATLAS_DIMS\n // shrink loc0 to within one half edge texel - so as not to sample across edges of tiles.\n loc0 = vec2(0.5) / textureRes + loc0 * (vec2(1.0) - ATLAS_DIMS / textureRes);\n \n // interpolate between two slices\n float z = (pos.z)*(nSlices-1.0);\n float z0 = floor(z);\n float t = z-z0; //mod(z, 1.0);\n float z1 = min(z0+1.0, nSlices-1.0);\n\n // flipped:\n if (flipVolume.z == -1.0) {\n z0 = nSlices - z0 - 1.0;\n z1 = nSlices - z1 - 1.0;\n t = 1.0 - t;\n }\n\n // get slice offsets in texture atlas\n vec2 o0 = offsetFrontBack(z0) + loc0;\n vec2 o1 = offsetFrontBack(z1) + loc0;\n\n vec4 slice0Color = texture2D(tex, o0);\n vec4 slice1Color = texture2D(tex, o1);\n // NOTE we could premultiply the mask in the fuse function,\n // but that is slower to update the maskAlpha value than here in the shader.\n // it is a memory vs perf tradeoff. Do users really need to update the maskAlpha at realtime speed?\n float slice0Mask = texture2D(textureAtlasMask, o0).x;\n float slice1Mask = texture2D(textureAtlasMask, o1).x;\n // or use max for conservative 0 or 1 masking?\n float maskVal = mix(slice0Mask, slice1Mask, t);\n // take mask from 0..1 to alpha..1\n maskVal = mix(maskVal, 1.0, maskAlpha);\n vec4 retval = mix(slice0Color, slice1Color, t);\n // only mask the rgb, not the alpha(?)\n retval.rgb *= maskVal;\n return bounds*retval;\n}\n\nvec4 sampleAtlasNearest(sampler2D tex, vec4 pos) {\n float bounds = float(pos[0] >= 0.0 && pos[0] <= 1.0 &&\n pos[1] >= 0.0 && pos[1] <= 1.0 &&\n pos[2] >= 0.0 && pos[2] <= 1.0 );\n float nSlices = float(SLICES);\n\n vec2 loc0 = ((pos.xy - 0.5) * flipVolume.xy + 0.5) / ATLAS_DIMS;\n\n // No interpolation - sample just one slice at a pixel center.\n // Ideally this would be accomplished in part by switching this texture to linear\n // filtering, but three makes this difficult to do through a WebGLRenderTarget.\n loc0 = floor(loc0 * textureRes) / textureRes;\n loc0 += vec2(0.5) / textureRes;\n\n float z = min(floor(pos.z * nSlices), nSlices-1.0);\n \n if (flipVolume.z == -1.0) {\n z = nSlices - z - 1.0;\n }\n\n vec2 o = offsetFrontBack(z) + loc0;\n vec4 voxelColor = texture2D(tex, o);\n\n // Apply mask\n float voxelMask = texture2D(textureAtlasMask, o).x;\n voxelMask = mix(voxelMask, 1.0, maskAlpha);\n voxelColor.rgb *= voxelMask;\n\n return bounds*voxelColor;\n}\n\nbool intersectBox(in vec3 r_o, in vec3 r_d, in vec3 boxMin, in vec3 boxMax,\n out float tnear, out float tfar) {\n // compute intersection of ray with all six bbox planes\n vec3 invR = vec3(1.0,1.0,1.0) / r_d;\n vec3 tbot = invR * (boxMin - r_o);\n vec3 ttop = invR * (boxMax - r_o);\n\n // re-order intersections to find smallest and largest on each axis\n vec3 tmin = min(ttop, tbot);\n vec3 tmax = max(ttop, tbot);\n\n // find the largest tmin and the smallest tmax\n float largest_tmin = max(max(tmin.x, tmin.y), max(tmin.x, tmin.z));\n float smallest_tmax = min(min(tmax.x, tmax.y), min(tmax.x, tmax.z));\n\n tnear = largest_tmin;\n tfar = smallest_tmax;\n\n // use >= here?\n return(smallest_tmax > largest_tmin);\n}\n\nvec4 accumulate(vec4 col, float s, vec4 C) {\n float stepScale = (1.0 - powf((1.0-col.w),s));\n col.w = stepScale;\n col.xyz *= col.w;\n col = clamp(col,0.0,1.0);\n\n C = (1.0-C.w)*col + C;\n return C;\n}\n\nvec4 integrateVolume(vec4 eye_o,vec4 eye_d,\n float tnear, float tfar,\n float clipNear, float clipFar,\n sampler2D textureAtlas\n ) {\n vec4 C = vec4(0.0);\n // march along ray from front to back, accumulating color\n\n // estimate step length\n const int maxSteps = 512;\n // modify the 3 components of eye_d by volume scale\n float scaledSteps = float(BREAK_STEPS) * length((eye_d.xyz/volumeScale));\n float csteps = clamp(float(scaledSteps), 1.0, float(maxSteps));\n float invstep = (tfar-tnear)/csteps;\n // special-casing the single slice to remove the random ray dither.\n // this removes a Moire pattern visible in single slice images, which we want to view as 2D images as best we can.\n float r = (SLICES==1.0) ? 0.0 : rand(eye_d.xy);\n // if ortho and clipped, make step size smaller so we still get same number of steps\n float tstep = invstep*orthoThickness;\n float tfarsurf = r*tstep;\n float overflow = mod((tfarsurf - tfar),tstep); // random dithering offset\n float t = tnear + overflow;\n t += r*tstep; // random dithering offset\n float tdist = 0.0;\n int numSteps = 0;\n vec4 pos, col;\n // We need to be able to scale the alpha contrib with number of ray steps,\n // in order to make the final color invariant to the step size(?)\n // use maxSteps (a constant) as the numerator... Not sure if this is sound.\n float s = 0.5 * float(maxSteps) / csteps;\n for (int i = 0; i < maxSteps; i++) {\n pos = eye_o + eye_d*t;\n // !!! assume box bounds are -0.5 .. 0.5. pos = (pos-min)/(max-min)\n // scaling is handled by model transform and already accounted for before we get here.\n // AABB clip is independent of this and is only used to determine tnear and tfar.\n pos.xyz = (pos.xyz-(-0.5))/((0.5)-(-0.5)); //0.5 * (pos + 1.0); // map position from [boxMin, boxMax] to [0, 1] coordinates\n\n vec4 col = interpolationEnabled ? sampleAtlasLinear(textureAtlas, pos) : sampleAtlasNearest(textureAtlas, pos);\n\n if (maxProject != 0) {\n col.xyz *= BRIGHTNESS;\n C = max(col, C);\n } else {\n col = luma2Alpha(col, GAMMA_MIN, GAMMA_MAX, GAMMA_SCALE);\n col.xyz *= BRIGHTNESS;\n // for practical use the density only matters for regular volume integration\n col.w *= DENSITY;\n C = accumulate(col, s, C);\n }\n t += tstep;\n numSteps = i;\n\n if (t > tfar || t > tnear+clipFar ) break;\n if (C.w > 1.0 ) break;\n }\n\n return C;\n}\n\nvoid main() {\n gl_FragColor = vec4(0.0);\n vec2 vUv = gl_FragCoord.xy/iResolution.xy;\n\n vec3 eyeRay_o, eyeRay_d;\n\n if (isOrtho == 0.0) {\n // for perspective rays:\n // world space camera coordinates\n // transform to object space\n eyeRay_o = (inverseModelViewMatrix * vec4(0.0, 0.0, 0.0, 1.0)).xyz;\n eyeRay_d = normalize(pObj - eyeRay_o);\n } else {\n // for ortho rays:\n float zDist = 2.0;\n eyeRay_d = (inverseModelViewMatrix*vec4(0.0, 0.0, -zDist, 0.0)).xyz;\n vec4 ray_o = vec4(2.0*vUv - 1.0, 1.0, 1.0);\n ray_o.xy *= orthoScale;\n ray_o.x *= iResolution.x/iResolution.y;\n eyeRay_o = (inverseModelViewMatrix*ray_o).xyz;\n }\n\n // -0.5..0.5 is full box. AABB_CLIP lets us clip to a box shaped ROI to look at\n // I am applying it here at the earliest point so that the ray march does\n // not waste steps. For general shaped ROI, this has to be handled more\n // generally (obviously)\n vec3 boxMin = AABB_CLIP_MIN;\n vec3 boxMax = AABB_CLIP_MAX;\n\n float tnear, tfar;\n bool hit = intersectBox(eyeRay_o, eyeRay_d, boxMin, boxMax, tnear, tfar);\n\n if (!hit) {\n // return background color if ray misses the cube\n // is this safe to do when there is other geometry / gObjects drawn?\n gl_FragColor = vec4(0.0); //C1;//vec4(0.0);\n return;\n }\n\n float clipNear = 0.0;//-(dot(eyeRay_o.xyz, eyeNorm) + dNear) / dot(eyeRay_d.xyz, eyeNorm);\n float clipFar = 10000.0;//-(dot(eyeRay_o.xyz,-eyeNorm) + dFar ) / dot(eyeRay_d.xyz,-eyeNorm);\n\n // Sample the depth/position texture\n // If this is a depth texture, the r component is a depth value. If this is a position texture,\n // the xyz components are a view space position and w is 1.0 iff there's a mesh at this fragment.\n vec4 meshPosSample = texture2D(textureDepth, vUv);\n // Note: we make a different check for whether a mesh is present with depth vs. position textures.\n // Here's the check for depth textures:\n bool hasDepthValue = usingPositionTexture == 0 && meshPosSample.r < 1.0;\n\n // If there's a depth-contributing mesh at this fragment, we may need to terminate the ray early\n if (hasDepthValue || (usingPositionTexture == 1 && meshPosSample.a > 0.0)) {\n if (hasDepthValue) {\n // We're working with a depth value, so we need to convert back to view space position\n // Get a projection space position from depth and uv, and unproject back to view space\n vec4 meshProj = vec4(vUv * 2.0 - 1.0, meshPosSample.r * 2.0 - 1.0, 1.0);\n vec4 meshView = inverseProjMatrix * meshProj;\n meshPosSample = vec4(meshView.xyz / meshView.w, 1.0);\n }\n // Transform the mesh position to object space\n vec4 meshObj = inverseModelViewMatrix * meshPosSample;\n\n // Derive a t value for the mesh intersection\n // NOTE: divides by 0 when `eyeRay_d.z` is 0. Could be mitigated by picking another component\n // to derive with when z is 0, but I found this was rare enough in practice to be acceptable.\n float tMesh = (meshObj.z - eyeRay_o.z) / eyeRay_d.z;\n if (tMesh < tfar) {\n clipFar = tMesh - tnear;\n }\n }\n\n vec4 C = integrateVolume(vec4(eyeRay_o,1.0), vec4(eyeRay_d,0.0),\n tnear, tfar, //intersections of box\n clipNear, clipFar,\n textureAtlas);\n\n C = clamp(C, 0.0, 1.0);\n gl_FragColor = C;\n return;\n}\n";
6
+ export const rayMarchingVertexShaderSrc = rayMarchVertexShader;
7
+ export const rayMarchingFragmentShaderSrc = rayMarchFragmentShader;
8
+ export const rayMarchingShaderUniforms = () => {
9
+ return {
10
+ iResolution: {
11
+ type: "v2",
12
+ value: new Vector2(100, 100)
13
+ },
14
+ CLIP_NEAR: {
15
+ type: "f",
16
+ value: 0.1
17
+ },
18
+ CLIP_FAR: {
19
+ type: "f",
20
+ value: 20.0
21
+ },
22
+ maskAlpha: {
23
+ type: "f",
24
+ value: 1.0
25
+ },
26
+ BRIGHTNESS: {
27
+ type: "f",
28
+ value: 0.0
29
+ },
30
+ DENSITY: {
31
+ type: "f",
32
+ value: 1.0
33
+ },
34
+ GAMMA_MIN: {
35
+ type: "f",
36
+ value: 0.0
37
+ },
38
+ GAMMA_MAX: {
39
+ type: "f",
40
+ value: 1.0
41
+ },
42
+ GAMMA_SCALE: {
43
+ type: "f",
44
+ value: 1.0
45
+ },
46
+ BREAK_STEPS: {
47
+ type: "i",
48
+ value: 128
49
+ },
50
+ ATLAS_DIMS: {
51
+ type: "v2",
52
+ value: new Vector2(6, 6)
53
+ },
54
+ SLICES: {
55
+ type: "f",
56
+ value: 50
57
+ },
58
+ isOrtho: {
59
+ type: "f",
60
+ value: 0.0
61
+ },
62
+ orthoThickness: {
63
+ type: "f",
64
+ value: 1.0
65
+ },
66
+ orthoScale: {
67
+ type: "f",
68
+ value: 0.5 // needs to come from ThreeJsPanel's setting
69
+ },
70
+ AABB_CLIP_MIN: {
71
+ type: "v3",
72
+ value: new Vector3(-0.5, -0.5, -0.5)
73
+ },
74
+ AABB_CLIP_MAX: {
75
+ type: "v3",
76
+ value: new Vector3(0.5, 0.5, 0.5)
77
+ },
78
+ inverseModelViewMatrix: {
79
+ type: "m4",
80
+ value: new Matrix4()
81
+ },
82
+ inverseProjMatrix: {
83
+ type: "m4",
84
+ value: new Matrix4()
85
+ },
86
+ textureAtlas: {
87
+ type: "t",
88
+ value: new Texture()
89
+ },
90
+ textureAtlasMask: {
91
+ type: "t",
92
+ value: new Texture()
93
+ },
94
+ textureDepth: {
95
+ type: "t",
96
+ value: new Texture()
97
+ },
98
+ usingPositionTexture: {
99
+ type: "i",
100
+ value: 0
101
+ },
102
+ maxProject: {
103
+ type: "i",
104
+ value: 0
105
+ },
106
+ interpolationEnabled: {
107
+ type: "b",
108
+ value: true
109
+ },
110
+ flipVolume: {
111
+ type: "v3",
112
+ value: new Vector3(1.0, 1.0, 1.0)
113
+ },
114
+ volumeScale: {
115
+ type: "v3",
116
+ value: new Vector3(1.0, 1.0, 1.0)
117
+ },
118
+ textureRes: {
119
+ type: "v2",
120
+ value: new Vector2(1.0, 1.0)
121
+ }
122
+ };
123
+ };
@@ -0,0 +1,115 @@
1
+ /* babel-plugin-inline-import './shaders/slice.vert' */
2
+ const sliceVertexShader = "precision highp float;\nprecision highp int;\n\nvarying vec2 vUv;\n\nvoid main() {\n vUv = uv;\n gl_Position = projectionMatrix *\n modelViewMatrix *\n vec4(position, 1.0);\n}\n";
3
+ /* babel-plugin-inline-import './shaders/slice.frag' */
4
+ const sliceFragShader = "\n#ifdef GL_ES\nprecision highp float;\n#endif\n\nuniform vec2 textureRes;\nuniform float GAMMA_MIN;\nuniform float GAMMA_MAX;\nuniform float GAMMA_SCALE;\nuniform float BRIGHTNESS;\nuniform float DENSITY;\nuniform float maskAlpha;\nuniform vec2 ATLAS_DIMS;\nuniform vec3 AABB_CLIP_MIN;\nuniform vec3 AABB_CLIP_MAX;\nuniform sampler2D textureAtlas;\nuniform sampler2D textureAtlasMask;\nuniform int Z_SLICE;\nuniform float SLICES;\nuniform bool interpolationEnabled;\nuniform vec3 flipVolume;\n\nvarying vec2 vUv;\n\n// for atlased texture, we need to find the uv offset for the slice at t\nvec2 offsetFrontBack(float t) {\n int a = int(t);\n int ax = int(ATLAS_DIMS.x);\n vec2 os = vec2(float(a - (a / ax) * ax), float(a / ax)) / ATLAS_DIMS;\n return clamp(os, vec2(0.0), vec2(1.0) - vec2(1.0) / ATLAS_DIMS);\n}\n\nvec4 sampleAtlas(sampler2D tex, vec4 pos) {\n float bounds = float(pos[0] >= 0.0 && pos[0] <= 1.0 &&\n pos[1] >= 0.0 && pos[1] <= 1.0 &&\n pos[2] >= 0.0 && pos[2] <= 1.0);\n\n float nSlices = float(SLICES);\n\n vec2 loc0 = ((pos.xy - 0.5) * flipVolume.xy + 0.5) / ATLAS_DIMS;\n\n\n if (interpolationEnabled) {\n // loc ranges from 0 to 1/ATLAS_DIMS\n // shrink loc0 to within one half edge texel - so as not to sample across edges of tiles.\n loc0 = loc0 * (vec2(1.0) - ATLAS_DIMS / textureRes);\n }\n else {\n // No interpolation - sample just one slice at a pixel center.\n loc0 = floor(loc0 * textureRes) / textureRes;\n }\n loc0 += vec2(0.5) / textureRes;\n\n float z = min(floor(pos.z * nSlices), nSlices - 1.0);\n\n if(flipVolume.z == -1.0) {\n z = nSlices - z - 1.0;\n }\n\n vec2 o = offsetFrontBack(z) + loc0;\n vec4 voxelColor = texture2D(tex, o);\n\n // Apply mask\n float voxelMask = texture2D(textureAtlasMask, o).x;\n voxelMask = mix(voxelMask, 1.0, maskAlpha);\n voxelColor.rgb *= voxelMask;\n\n return bounds * voxelColor;\n}\n\nvoid main() {\n gl_FragColor = vec4(0.0);\n\n vec3 boxMin = AABB_CLIP_MIN;\n vec3 boxMax = AABB_CLIP_MAX;\n // Normalize UV for [-0.5, 0.5] range\n vec2 normUv = vUv - vec2(0.5);\n\n // Return background color if outside of clipping box\n if(normUv.x < boxMin.x || normUv.x > boxMax.x || normUv.y < boxMin.y || normUv.y > boxMax.y) {\n gl_FragColor = vec4(0.0);\n return;\n }\n\n // Normalize z-slice by total slices\n vec4 pos = vec4(vUv, \n (SLICES==1.0 && Z_SLICE==0) ? 0.0 : float(Z_SLICE) / (SLICES - 1.0), \n 0.0);\n\n vec4 C;\n C = sampleAtlas(textureAtlas, pos);\n C.xyz *= BRIGHTNESS;\n\n C = clamp(C, 0.0, 1.0);\n gl_FragColor = C;\n return;\n}";
5
+ import { Vector2, Vector3, Matrix4, Texture } from "three";
6
+ export const sliceVertexShaderSrc = sliceVertexShader;
7
+ export const sliceFragmentShaderSrc = sliceFragShader;
8
+ export const sliceShaderUniforms = () => {
9
+ return {
10
+ iResolution: {
11
+ type: "v2",
12
+ value: new Vector2(100, 100)
13
+ },
14
+ CLIP_NEAR: {
15
+ type: "f",
16
+ value: 0.0
17
+ },
18
+ CLIP_FAR: {
19
+ type: "f",
20
+ value: 10000.0
21
+ },
22
+ maskAlpha: {
23
+ type: "f",
24
+ value: 1.0
25
+ },
26
+ BRIGHTNESS: {
27
+ type: "f",
28
+ value: 0.0
29
+ },
30
+ DENSITY: {
31
+ type: "f",
32
+ value: 1.0
33
+ },
34
+ GAMMA_MIN: {
35
+ type: "f",
36
+ value: 0.0
37
+ },
38
+ GAMMA_MAX: {
39
+ type: "f",
40
+ value: 1.0
41
+ },
42
+ GAMMA_SCALE: {
43
+ type: "f",
44
+ value: 1.0
45
+ },
46
+ BREAK_STEPS: {
47
+ type: "i",
48
+ value: 128
49
+ },
50
+ ATLAS_DIMS: {
51
+ type: "v2",
52
+ value: new Vector2(6, 6)
53
+ },
54
+ Z_SLICE: {
55
+ type: "i",
56
+ value: 0
57
+ },
58
+ SLICES: {
59
+ type: "f",
60
+ value: 50
61
+ },
62
+ isOrtho: {
63
+ type: "f",
64
+ value: 0.0
65
+ },
66
+ orthoThickness: {
67
+ type: "f",
68
+ value: 1.0
69
+ },
70
+ orthoScale: {
71
+ type: "f",
72
+ value: 0.5 // needs to come from ThreeJsPanel's setting
73
+ },
74
+ AABB_CLIP_MIN: {
75
+ type: "v3",
76
+ value: new Vector3(-0.5, -0.5, -0.5)
77
+ },
78
+ AABB_CLIP_MAX: {
79
+ type: "v3",
80
+ value: new Vector3(0.5, 0.5, 0.5)
81
+ },
82
+ inverseModelViewMatrix: {
83
+ type: "m4",
84
+ value: new Matrix4()
85
+ },
86
+ textureAtlas: {
87
+ type: "t",
88
+ value: new Texture()
89
+ },
90
+ textureAtlasMask: {
91
+ type: "t",
92
+ value: new Texture()
93
+ },
94
+ maxProject: {
95
+ type: "i",
96
+ value: 0
97
+ },
98
+ interpolationEnabled: {
99
+ type: "b",
100
+ value: true
101
+ },
102
+ flipVolume: {
103
+ type: "v3",
104
+ value: new Vector3(1.0, 1.0, 1.0)
105
+ },
106
+ volumeScale: {
107
+ type: "v3",
108
+ value: new Vector3(1.0, 1.0, 1.0)
109
+ },
110
+ textureRes: {
111
+ type: "v2",
112
+ value: new Vector2(1.0, 1.0)
113
+ }
114
+ };
115
+ };
package/es/index.js ADDED
@@ -0,0 +1,21 @@
1
+ import { RENDERMODE_PATHTRACE, RENDERMODE_RAYMARCH, View3d } from "./View3d.js";
2
+ import Volume from "./Volume.js";
3
+ import VolumeDrawable from "./VolumeDrawable.js";
4
+ import Channel from "./Channel.js";
5
+ import VolumeMaker from "./VolumeMaker.js";
6
+ import VolumeCache from "./VolumeCache.js";
7
+ import RequestQueue from "./utils/RequestQueue.js";
8
+ import SubscribableRequestQueue from "./utils/SubscribableRequestQueue.js";
9
+ import Histogram from "./Histogram.js";
10
+ import { Lut, remapControlPoints } from "./Lut.js";
11
+ import { ViewportCorner } from "./types.js";
12
+ import { VolumeFileFormat, createVolumeLoader, PrefetchDirection } from "./loaders/index.js";
13
+ import { LoadSpec } from "./loaders/IVolumeLoader.js";
14
+ import { OMEZarrLoader } from "./loaders/OmeZarrLoader.js";
15
+ import { JsonImageInfoLoader } from "./loaders/JsonImageInfoLoader.js";
16
+ import { RawArrayLoader } from "./loaders/RawArrayLoader.js";
17
+ import { TiffLoader } from "./loaders/TiffLoader.js";
18
+ import VolumeLoaderContext from "./workers/VolumeLoaderContext.js";
19
+ import { VolumeLoadError, VolumeLoadErrorType } from "./loaders/VolumeLoadError.js";
20
+ import { Light, AREA_LIGHT, SKY_LIGHT } from "./Light.js";
21
+ export { Histogram, Lut, remapControlPoints, View3d, Volume, VolumeDrawable, LoadSpec, VolumeMaker, VolumeCache, RequestQueue, SubscribableRequestQueue, PrefetchDirection, OMEZarrLoader, JsonImageInfoLoader, RawArrayLoader, TiffLoader, VolumeLoaderContext, VolumeLoadError, VolumeLoadErrorType, VolumeFileFormat, createVolumeLoader, Channel, Light, ViewportCorner, AREA_LIGHT, RENDERMODE_PATHTRACE, RENDERMODE_RAYMARCH, SKY_LIGHT };
@@ -0,0 +1,131 @@
1
+ import { Box3, Vector3 } from "three";
2
+ import Volume from "../Volume.js";
3
+ import { CImageInfo } from "../ImageInfo.js";
4
+ import { buildDefaultMetadata } from "./VolumeLoaderUtils.js";
5
+ export class LoadSpec {
6
+ time = 0;
7
+ /** The max size of a volume atlas that may be produced by a load. Used to pick the appropriate multiscale level. */
8
+
9
+ /** An optional bias added to the scale level index after the optimal level is picked based on `maxAtlasEdge`. */
10
+
11
+ /**
12
+ * The max scale level to load. Even when this is specified, the loader may pick a *lower* scale level based on
13
+ * limits imposed by `scaleLevelBias` and `maxAtlasEdge` (or their defaults if unspecified).
14
+ */
15
+
16
+ /** Subregion of volume to load. If not specified, the entire volume is loaded. Specify as floats between 0-1. */
17
+ subregion = new Box3(new Vector3(0, 0, 0), new Vector3(1, 1, 1));
18
+ /** Treat multiscaleLevel literally and don't use other constraints to change it.
19
+ * By default we will try to load the best level based on the maxAtlasEdge and scaleLevelBias,
20
+ * so this is false.
21
+ */
22
+ useExplicitLevel = false;
23
+ }
24
+ export function loadSpecToString(spec) {
25
+ const {
26
+ min,
27
+ max
28
+ } = spec.subregion;
29
+ return `${spec.multiscaleLevel}:${spec.time}:x(${min.x},${max.x}):y(${min.y},${max.y}):z(${min.z},${max.z})`;
30
+ }
31
+
32
+ /**
33
+ * @callback PerChannelCallback
34
+ * @param {string} imageurl
35
+ * @param {Volume} volume
36
+ * @param {number} channelindex
37
+ */
38
+
39
+ /**
40
+ * @callback RawChannelDataCallback - allow lists of channel indices and data arrays to be passed to the callback
41
+ * @param {number[]} channelIndex - The indices of the channels that were loaded
42
+ * @param {NumberType[]} dtype - The data type of the data arrays
43
+ * @param {TypedArray<NumberType>[]} data - The raw data for each channel
44
+ * @param {[number, number][]} ranges - The min and max values for each channel in their original range
45
+ * @param {[number, number]} atlasDims - The dimensions of the atlas, if the data is in an atlas format
46
+ */
47
+
48
+ /**
49
+ * Loads volume data from a source specified by a `LoadSpec`.
50
+ *
51
+ * Loaders may keep state for reuse between volume creation and volume loading, and should be kept alive until volume
52
+ * loading is complete. (See `createVolume`)
53
+ */
54
+
55
+ /** Abstract class which allows loaders to accept and return types that are easier to transfer to/from a worker. */
56
+ export class ThreadableVolumeLoader {
57
+ /** Unchanged from `IVolumeLoader`. See that interface for details. */
58
+
59
+ /**
60
+ * Creates an `ImageInfo` object from a `LoadSpec`, which may be passed to the `Volume` constructor to create an
61
+ * empty volume that can accept data loaded with the given `LoadSpec`.
62
+ *
63
+ * Also returns a new `LoadSpec` that may have been modified from the input `LoadSpec` to reflect the constraints or
64
+ * abilities of the loader. This new `LoadSpec` should be used when constructing the `Volume`, _not_ the original.
65
+ */
66
+
67
+ /**
68
+ * Begins loading per-channel data for the volume specified by `imageInfo` and `loadSpec`.
69
+ *
70
+ * This function accepts two required callbacks. The first, `onUpdateVolumeMetadata`, should be called at most once
71
+ * to modify the `Volume`'s `imageInfo` and/or `loadSpec` properties based on changes made by this load. Actual
72
+ * loaded channel data is passed to `onData` as it is loaded.
73
+ *
74
+ * Depending on the loader, the array passed to `onData` may be in simple 3d dimension order or reflect a 2d atlas.
75
+ * If the latter, the dimensions of the atlas are passed as the third argument to `onData`.
76
+ *
77
+ * The returned promise should resolve when all data has been loaded, or reject if any error occurs while loading.
78
+ */
79
+
80
+ setPrefetchPriority(_directions) {
81
+ // no-op by default
82
+ }
83
+ syncMultichannelLoading(_sync) {
84
+ // default behavior is async, to update channels as they arrive, depending on each
85
+ // loader's implementation details.
86
+ }
87
+ updateFetchOptions(_options) {
88
+ // no-op by default
89
+ }
90
+ async createVolume(loadSpec, onChannelLoaded) {
91
+ const {
92
+ imageInfo,
93
+ loadSpec: adjustedLoadSpec
94
+ } = await this.createImageInfo(loadSpec);
95
+ const vol = new Volume(imageInfo, adjustedLoadSpec, this);
96
+ vol.channelLoadCallback = onChannelLoaded;
97
+ vol.imageMetadata = buildDefaultMetadata(imageInfo);
98
+ return vol;
99
+ }
100
+ async loadVolumeData(volume, loadSpecOverride, onChannelLoaded) {
101
+ const onUpdateMetadata = (imageInfo, loadSpec) => {
102
+ if (imageInfo) {
103
+ volume.imageInfo = new CImageInfo(imageInfo);
104
+ volume.updateDimensions();
105
+ }
106
+ volume.loadSpec = {
107
+ ...loadSpec,
108
+ ...spec
109
+ };
110
+ };
111
+ const onChannelData = (channelIndices, dtypes, dataArrays, ranges, atlasDims) => {
112
+ for (let i = 0; i < channelIndices.length; i++) {
113
+ const channelIndex = channelIndices[i];
114
+ const dtype = dtypes[i];
115
+ const data = dataArrays[i];
116
+ const range = ranges[i];
117
+ if (atlasDims) {
118
+ volume.setChannelDataFromAtlas(channelIndex, data, atlasDims[0], atlasDims[1], range, dtype);
119
+ } else {
120
+ volume.setChannelDataFromVolume(channelIndex, data, range, dtype);
121
+ }
122
+ onChannelLoaded?.(volume, channelIndex);
123
+ }
124
+ };
125
+ const spec = {
126
+ ...volume.loadSpec,
127
+ ...loadSpecOverride
128
+ };
129
+ return this.loadRawChannelData(volume.imageInfo.imageInfo, spec, onUpdateMetadata, onChannelData);
130
+ }
131
+ }