rayzee 5.4.3 → 5.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "rayzee",
3
- "version": "5.4.3",
3
+ "version": "5.6.0",
4
4
  "type": "module",
5
5
  "description": "Real-time WebGPU path tracing engine built on Three.js",
6
6
  "main": "dist/rayzee.umd.js",
@@ -1,11 +1,25 @@
1
1
  import { Fn, wgslFn, vec4, float, int, uint, ivec2, uvec2, uniform, If, max,
2
- textureLoad, textureStore, workgroupArray, workgroupBarrier, localId, workgroupId,
3
- attributeArray } from 'three/tsl';
2
+ textureLoad, textureStore, workgroupBarrier, localId, workgroupId,
3
+ attributeArray, atomicAdd, atomicStore, atomicLoad, Loop } from 'three/tsl';
4
4
  import { RenderTarget, TextureNode, StorageTexture, ReadbackBuffer } from 'three/webgpu';
5
5
  import { FloatType, RGBAFormat, NearestFilter } from 'three';
6
6
  import { RenderStage, StageExecutionMode } from '../Pipeline/RenderStage.js';
7
7
  import { luminance } from '../TSL/Common.js';
8
8
 
9
+ // ── Histogram constants ────────────────────────────────────
10
+ const NUM_BINS = 256;
11
+ const MIN_LOG_LUM = - 8.0; // ln(~0.00034) — very dark
12
+ const MAX_LOG_LUM = 6.0; // ln(~403) — bright specular
13
+ const LOG_LUM_RANGE = MAX_LOG_LUM - MIN_LOG_LUM; // 14 nats ≈ 20 stops
14
+ const BIN_WIDTH = LOG_LUM_RANGE / NUM_BINS;
15
+ const WEIGHT_SCALE = 10000; // float → uint quantisation for metering weights
16
+
17
+ // ── Metering ────────────────────────────────────────────────
18
+ // Centre-weighted Gaussian is the only mode — spot and uniform
19
+ // are unnecessary given the percentile clipping already handles
20
+ // extreme highlights/shadows. The centerWeight uniform controls
21
+ // the Gaussian falloff steepness.
22
+
9
23
  // ── wgslFn helpers ──────────────────────────────────────────
10
24
 
11
25
  /**
@@ -45,31 +59,30 @@ const adaptExposure = /*@__PURE__*/ wgslFn( `
45
59
  ` );
46
60
 
47
61
  /**
48
- * WebGPU Auto-Exposure Stage (Fully Compute Shader)
62
+ * WebGPU Auto-Exposure Stage Histogram-Based with Centre-Weighted Metering
49
63
  *
50
64
  * GPU-based automatic exposure control with human eye-like adaptation.
51
- * Uses hierarchical luminance reduction and asymmetric temporal smoothing.
65
+ * Uses histogram-based luminance analysis with percentile clipping
66
+ * and centre-weighted spatial metering for robust exposure estimation.
52
67
  *
53
68
  * Algorithm:
54
- * 1. Downsample (compute): full res → 64×64 log-luminance
55
- * 2. Reduction (compute): parallel reduction 64×64 1×1 via shared memory
56
- * Single workgroup of 256 threads, each loads 16 texels.
57
- * Computes geometric mean: exp(Σlog(L) / N)
58
- * 3. Adaptation (compute): temporal smoothing with prev exposure; writes
59
- * vec4(exposure, luminance, targetExposure, 1) into a 1-element storage buffer.
60
- * 4. Async readback via `renderer.getArrayBufferAsync(attr, ReadbackBuffer)`:
61
- * the ReadbackBuffer pools its staging GPUBuffer across frames, avoiding
62
- * per-frame allocation churn. Apply to renderer.toneMappingExposure.
63
- *
64
- * WebGPU advantage: async readback (no GPU pipeline stall).
65
- * 1-frame delay is imperceptible for slowly-changing exposure.
69
+ * 1. Downsample (compute): full res → 64×64 log-luminance grid
70
+ * 2. Histogram (compute): build 256-bin weighted histogram from the 64×64
71
+ * grid. Single workgroup of 256 threads; each loads 16 texels, applies
72
+ * centre-weighted Gaussian, and scatters via atomicAdd into a storage buffer.
73
+ * 3. Analyze (compute): single thread reads the histogram, computes CDF,
74
+ * extracts percentile-clipped weighted mean (ignoring bottom/top
75
+ * extremes), and writes the geometric mean to a 1×1 storage texture.
76
+ * 4. Adaptation (compute): temporal smoothing with prev exposure; writes
77
+ * vec4(exposure, luminance, targetExposure, 1) into a 1-element buffer.
78
+ * 5. Async readback via `renderer.getArrayBufferAsync(attr, ReadbackBuffer)`.
66
79
  *
67
80
  * Execution: ALWAYS
68
81
  *
69
82
  * Events listened:
70
83
  * pipeline:reset — reset temporal history
71
84
  * autoexposure:toggle — enable/disable
72
- * autoexposure:updateParameters — update key value, speeds, bounds
85
+ * autoexposure:updateParameters — update key value, speeds, bounds, percentiles
73
86
  *
74
87
  * Textures published: (none — publishes state, not textures)
75
88
  * Textures read: edgeFiltering:output > asvgf:output > pathtracer:color
@@ -101,6 +114,12 @@ export class AutoExposure extends RenderStage {
101
114
  this.isFirstFrameU = uniform( 1.0 ); // 1.0 = true
102
115
  this.previousExposureU = uniform( options.initialExposure ?? 1.0 );
103
116
 
117
+ // ── Histogram & metering uniforms ────────────────
118
+
119
+ this.lowPercentileU = uniform( options.lowPercentile ?? 0.10 );
120
+ this.highPercentileU = uniform( options.highPercentile ?? 0.90 );
121
+ this.centerWeightU = uniform( options.centerWeight ?? 8.0 );
122
+
104
123
  // ── Input resolution uniforms (for downsample compute) ──
105
124
 
106
125
  this.inputResW = uniform( 1 );
@@ -149,14 +168,14 @@ export class AutoExposure extends RenderStage {
149
168
  this._downsampleStorageTex.minFilter = NearestFilter;
150
169
  this._downsampleStorageTex.magFilter = NearestFilter;
151
170
 
152
- // 1×1 StorageTexture for compute reduction output
171
+ // 1×1 StorageTexture for histogram analysis output
153
172
  this._reductionStorageTex = new StorageTexture( 1, 1 );
154
173
  this._reductionStorageTex.type = FloatType;
155
174
  this._reductionStorageTex.format = RGBAFormat;
156
175
  this._reductionStorageTex.minFilter = NearestFilter;
157
176
  this._reductionStorageTex.magFilter = NearestFilter;
158
177
 
159
- // 1×1 RenderTarget — readable copy of reduction output (cross-dispatch reads
178
+ // 1×1 RenderTarget — readable copy of analysis output (cross-dispatch reads
160
179
  // from StorageTexture return zeros — must copy to RenderTarget first)
161
180
  this._reductionReadTarget = new RenderTarget( 1, 1, rtOpts );
162
181
 
@@ -167,6 +186,9 @@ export class AutoExposure extends RenderStage {
167
186
  this._readbackBuffer = new ReadbackBuffer( 16 );
168
187
  this._readbackBuffer.name = 'AutoExposureAdaptation';
169
188
 
189
+ // ── Histogram storage buffer (atomic uint, 256 bins) ─────
190
+ this._histogramBuffer = attributeArray( NUM_BINS, 'uint' ).toAtomic();
191
+
170
192
  }
171
193
 
172
194
  // ──────────────────────────────────────────────────
@@ -176,7 +198,8 @@ export class AutoExposure extends RenderStage {
176
198
  _buildCompute() {
177
199
 
178
200
  this._buildDownsampleCompute();
179
- this._buildReductionCompute();
201
+ this._buildHistogramCompute();
202
+ this._buildHistogramAnalyzeCompute();
180
203
  this._buildAdaptationCompute();
181
204
 
182
205
  }
@@ -257,94 +280,146 @@ export class AutoExposure extends RenderStage {
257
280
  }
258
281
 
259
282
  /**
260
- * Reduction: parallel compute 64×64 → 1×1
283
+ * Histogram Build (compute): 64×64 downsample 256-bin weighted histogram
261
284
  *
262
- * Single workgroup of 256 threads. Each thread loads 16 texels
263
- * from the 64×64 downsample texture, then participates in a
264
- * shared-memory parallel reduction.
285
+ * Single workgroup of 256 threads. Each thread processes 16 texels from
286
+ * the downsample grid, applies spatial metering weight, and atomically
287
+ * scatters into the histogram storage buffer.
265
288
  *
266
- * Output: StorageTexture(1×1) = vec4(geometricMean, count, avgLogLum, 1)
289
+ * Phase 1: Clear all 256 bins (one per thread)
290
+ * Phase 2: Build histogram with metering-weighted atomic scatter
267
291
  */
268
- _buildReductionCompute() {
292
+ _buildHistogramCompute() {
269
293
 
270
294
  const downsampleTex = this._downsampleTarget.texture;
271
- const outputTex = this._reductionStorageTex;
295
+ const histogram = this._histogramBuffer;
296
+ const centerWeight = this.centerWeightU;
272
297
 
273
298
  const WGSIZE = 256;
274
299
  const TEXELS_PER_THREAD = 16; // 4096 / 256
275
300
  const TEX_SIZE = 64;
276
301
 
277
- const sharedLogSum = workgroupArray( 'float', WGSIZE );
278
- const sharedCount = workgroupArray( 'float', WGSIZE );
279
-
280
- const reductionFn = Fn( () => {
302
+ const computeFn = Fn( () => {
281
303
 
282
304
  const tid = localId.x;
283
305
 
284
- // ── Phase 1: Each thread loads and sums 16 texels ──
285
-
286
- const threadLogSum = float( 0.0 ).toVar();
287
- const threadCount = float( 0.0 ).toVar();
306
+ // ── Phase 1: Clear histogram ──────────────────
307
+ atomicStore( histogram.element( tid ), uint( 0 ) );
308
+ workgroupBarrier();
288
309
 
289
- for ( let i = 0; i < TEXELS_PER_THREAD; i ++ ) {
310
+ // ── Phase 2: Build histogram ──────────────────
311
+ for ( let t = 0; t < TEXELS_PER_THREAD; t ++ ) {
290
312
 
291
- const linearIdx = tid.mul( TEXELS_PER_THREAD ).add( i );
313
+ const linearIdx = tid.mul( TEXELS_PER_THREAD ).add( t );
292
314
  const px = linearIdx.mod( TEX_SIZE );
293
315
  const py = linearIdx.div( TEX_SIZE );
316
+
294
317
  const data = textureLoad( downsampleTex, ivec2( int( px ), int( py ) ) );
318
+ const logLumSum = data.x;
319
+ const validCount = data.y;
320
+
321
+ If( validCount.greaterThan( 0.0 ), () => {
295
322
 
296
- // data.x = logLumSum, data.y = validCount from downsample
297
- threadLogSum.addAssign( data.x );
298
- threadCount.addAssign( data.y );
323
+ // Per-cell average log-luminance (natural log, matches downsample output)
324
+ const avgLogLum = logLumSum.div( validCount );
325
+
326
+ // Map to histogram bin [0, NUM_BINS-1]
327
+ const normalized = avgLogLum.sub( float( MIN_LOG_LUM ) ).div( float( LOG_LUM_RANGE ) );
328
+ const bin = uint( normalized.mul( float( NUM_BINS ) ).floor().clamp( 0.0, float( NUM_BINS - 1 ) ) );
329
+
330
+ // ── Centre-weighted metering ──────────
331
+ const uvx = float( px ).add( 0.5 ).div( float( TEX_SIZE ) );
332
+ const uvy = float( py ).add( 0.5 ).div( float( TEX_SIZE ) );
333
+ const dx = uvx.sub( 0.5 );
334
+ const dy = uvy.sub( 0.5 );
335
+ const dist2 = dx.mul( dx ).add( dy.mul( dy ) );
336
+
337
+ // Gaussian falloff: 1.0 at centre, ~0.02 at corners
338
+ const weight = dist2.mul( centerWeight ).negate().exp();
339
+
340
+ const weightUint = uint( weight.mul( float( WEIGHT_SCALE ) ) );
341
+ atomicAdd( histogram.element( bin ), weightUint );
342
+
343
+ } );
299
344
 
300
345
  }
301
346
 
302
- sharedLogSum.element( tid ).assign( threadLogSum );
303
- sharedCount.element( tid ).assign( threadCount );
347
+ } );
304
348
 
305
- // ── Phase 2: Parallel reduction (8 steps) ──────────
306
- // JS for-loop unrolls at shader build time
349
+ this._histogramComputeNode = computeFn().compute( [ 1, 1, 1 ], [ WGSIZE, 1, 1 ] );
307
350
 
308
- for ( let stride = WGSIZE / 2; stride >= 1; stride = Math.floor( stride / 2 ) ) {
351
+ }
309
352
 
310
- workgroupBarrier();
353
+ /**
354
+ * Histogram Analysis (compute): extract percentile-clipped geometric mean
355
+ *
356
+ * Single thread. Reads the 256-bin histogram, computes the CDF, clips
357
+ * the bottom and top percentiles, and computes the weighted geometric
358
+ * mean of luminance within the accepted range.
359
+ *
360
+ * Output: StorageTexture(1×1) = vec4(geometricMean, totalCount, avgLogLum, 1)
361
+ */
362
+ _buildHistogramAnalyzeCompute() {
311
363
 
312
- If( tid.lessThan( uint( stride ) ), () => {
364
+ const histogram = this._histogramBuffer;
365
+ const outputTex = this._reductionStorageTex;
366
+ const lowPercentile = this.lowPercentileU;
367
+ const highPercentile = this.highPercentileU;
313
368
 
314
- sharedLogSum.element( tid ).addAssign(
315
- sharedLogSum.element( tid.add( uint( stride ) ) )
316
- );
317
- sharedCount.element( tid ).addAssign(
318
- sharedCount.element( tid.add( uint( stride ) ) )
319
- );
369
+ const computeFn = Fn( () => {
320
370
 
321
- } );
371
+ // ── Pass 1: compute total weight ──────────────
372
+ const totalWeight = float( 0.0 ).toVar();
322
373
 
323
- }
374
+ Loop( NUM_BINS, ( { i } ) => {
324
375
 
325
- // ── Phase 3: Thread 0 writes final result ──────────
376
+ totalWeight.addAssign( float( atomicLoad( histogram.element( i ) ) ) );
326
377
 
327
- workgroupBarrier();
378
+ } );
379
+
380
+ // Percentile thresholds (in quantised weight units)
381
+ const lowThreshold = totalWeight.mul( lowPercentile );
382
+ const highThreshold = totalWeight.mul( highPercentile );
383
+
384
+ // ── Pass 2: percentile-clipped weighted mean ──
385
+ const cumWeight = float( 0.0 ).toVar();
386
+ const logLumAccum = float( 0.0 ).toVar();
387
+ const validWeight = float( 0.0 ).toVar();
388
+ const prevCum = float( 0.0 ).toVar();
389
+
390
+ Loop( NUM_BINS, ( { i } ) => {
391
+
392
+ const binWeight = float( atomicLoad( histogram.element( i ) ) );
393
+ prevCum.assign( cumWeight );
394
+ cumWeight.addAssign( binWeight );
328
395
 
329
- If( tid.equal( uint( 0 ) ), () => {
396
+ // Include bin if it overlaps the [lowThreshold, highThreshold] range
397
+ If( prevCum.lessThan( highThreshold ).and( cumWeight.greaterThan( lowThreshold ) ), () => {
330
398
 
331
- const totalLogSum = sharedLogSum.element( uint( 0 ) );
332
- const totalCount = sharedCount.element( uint( 0 ) );
333
- const safeCount = max( totalCount, float( 1.0 ) );
334
- const avgLogLum = totalLogSum.div( safeCount );
335
- const geometricMean = avgLogLum.exp();
399
+ // Bin centre in log-luminance space
400
+ const binCenter = float( MIN_LOG_LUM ).add(
401
+ float( i ).add( 0.5 ).mul( float( BIN_WIDTH ) )
402
+ );
403
+ logLumAccum.addAssign( binCenter.mul( binWeight ) );
404
+ validWeight.addAssign( binWeight );
336
405
 
337
- textureStore(
338
- outputTex,
339
- uvec2( uint( 0 ), uint( 0 ) ),
340
- vec4( geometricMean, totalCount, avgLogLum, 1.0 )
341
- ).toWriteOnly();
406
+ } );
342
407
 
343
408
  } );
344
409
 
410
+ const safeWeight = max( validWeight, float( 1.0 ) );
411
+ const avgLogLum = logLumAccum.div( safeWeight );
412
+ const geometricMean = avgLogLum.exp();
413
+
414
+ textureStore(
415
+ outputTex,
416
+ uvec2( uint( 0 ), uint( 0 ) ),
417
+ vec4( geometricMean, totalWeight, avgLogLum, 1.0 )
418
+ ).toWriteOnly();
419
+
345
420
  } );
346
421
 
347
- this._reductionComputeNode = reductionFn().compute( 1, [ WGSIZE, 1, 1 ] );
422
+ this._histogramAnalyzeNode = computeFn().compute( 1, [ 1, 1, 1 ] );
348
423
 
349
424
  }
350
425
 
@@ -352,7 +427,7 @@ export class AutoExposure extends RenderStage {
352
427
  * Adaptation (compute): temporal smoothing
353
428
  *
354
429
  * Single-thread compute dispatch [1, 1, 1], workgroup [1, 1, 1].
355
- * Reads geometric mean from reduction RenderTarget, applies asymmetric
430
+ * Reads geometric mean from analysis RenderTarget, applies asymmetric
356
431
  * temporal smoothing using the previous-exposure uniform, and writes
357
432
  * vec4(exposure, luminance, targetExposure, 1) into a 1-element storage
358
433
  * buffer which the CPU reads via getArrayBufferAsync + ReadbackBuffer.
@@ -372,7 +447,7 @@ export class AutoExposure extends RenderStage {
372
447
 
373
448
  const computeFn = Fn( () => {
374
449
 
375
- // Read geometric mean from reduction result (1×1 RenderTarget)
450
+ // Read geometric mean from histogram analysis result (1×1 RenderTarget)
376
451
  const geoMean = textureLoad( reductionTex, ivec2( int( 0 ), int( 0 ) ) ).x;
377
452
 
378
453
  const result = adaptExposure(
@@ -404,16 +479,7 @@ export class AutoExposure extends RenderStage {
404
479
 
405
480
  } );
406
481
 
407
- this.on( 'autoexposure:updateParameters', ( data ) => {
408
-
409
- if ( ! data ) return;
410
- if ( data.keyValue !== undefined ) this.keyValueU.value = data.keyValue;
411
- if ( data.minExposure !== undefined ) this.minExposureU.value = data.minExposure;
412
- if ( data.maxExposure !== undefined ) this.maxExposureU.value = data.maxExposure;
413
- if ( data.adaptSpeedBright !== undefined ) this.adaptSpeedBrightU.value = data.adaptSpeedBright;
414
- if ( data.adaptSpeedDark !== undefined ) this.adaptSpeedDarkU.value = data.adaptSpeedDark;
415
-
416
- } );
482
+ this.on( 'autoexposure:updateParameters', ( data ) => data && this.updateParameters( data ) );
417
483
 
418
484
  }
419
485
 
@@ -450,12 +516,16 @@ export class AutoExposure extends RenderStage {
450
516
  this.renderer.compute( this._downsampleComputeNode );
451
517
  this.renderer.copyTextureToTexture( this._downsampleStorageTex, this._downsampleTarget.texture );
452
518
 
453
- // ── Pass 2: Reduction 64×64 → 1×1 (compute) ────────
519
+ // ── Pass 2: Histogram build (compute) ───────────────
520
+
521
+ this.renderer.compute( this._histogramComputeNode );
522
+
523
+ // ── Pass 3: Histogram analysis → 1×1 result ─────────
454
524
 
455
- this.renderer.compute( this._reductionComputeNode );
525
+ this.renderer.compute( this._histogramAnalyzeNode );
456
526
  this.renderer.copyTextureToTexture( this._reductionStorageTex, this._reductionReadTarget.texture );
457
527
 
458
- // ── Pass 3: Temporal adaptation (compute) ───────────
528
+ // ── Pass 4: Temporal adaptation (compute) ───────────
459
529
 
460
530
  this._reductionReadTexNode.value = this._reductionReadTarget.texture;
461
531
  this.renderer.compute( this._adaptationComputeNode );
@@ -578,7 +648,7 @@ export class AutoExposure extends RenderStage {
578
648
 
579
649
  setSize( /* width, height */ ) {
580
650
 
581
- // Downsample and reduction targets are fixed-size (64×64 → 1×1)
651
+ // Downsample and histogram targets are fixed-size (64×64 → 256 bins → 1×1)
582
652
  // No resizing needed — the downsample compute shader reads input
583
653
  // resolution from uniforms and computes block sizes dynamically.
584
654
 
@@ -611,13 +681,17 @@ export class AutoExposure extends RenderStage {
611
681
  if ( params.maxExposure !== undefined ) this.maxExposureU.value = params.maxExposure;
612
682
  if ( params.adaptSpeedBright !== undefined ) this.adaptSpeedBrightU.value = params.adaptSpeedBright;
613
683
  if ( params.adaptSpeedDark !== undefined ) this.adaptSpeedDarkU.value = params.adaptSpeedDark;
684
+ if ( params.lowPercentile !== undefined ) this.lowPercentileU.value = params.lowPercentile;
685
+ if ( params.highPercentile !== undefined ) this.highPercentileU.value = params.highPercentile;
686
+ if ( params.centerWeight !== undefined ) this.centerWeightU.value = params.centerWeight;
614
687
 
615
688
  }
616
689
 
617
690
  dispose() {
618
691
 
619
692
  this._downsampleComputeNode?.dispose();
620
- this._reductionComputeNode?.dispose();
693
+ this._histogramComputeNode?.dispose();
694
+ this._histogramAnalyzeNode?.dispose();
621
695
  this._adaptationComputeNode?.dispose();
622
696
  this._downsampleTarget?.dispose();
623
697
  this._downsampleStorageTex?.dispose();
@@ -2,9 +2,9 @@ import { storage } from 'three/tsl';
2
2
  import { StorageInstancedBufferAttribute } from 'three/webgpu';
3
3
  import {
4
4
  NearestFilter, Vector2, Matrix4,
5
- TextureLoader, RepeatWrapping, FloatType
5
+ TextureLoader, RepeatWrapping
6
6
  } from 'three';
7
- import { blueNoiseTextureNode } from '../TSL/Random.js';
7
+ import { stbnScalarTextureNode, stbnVec2TextureNode } from '../TSL/Random.js';
8
8
 
9
9
  // Pipeline system
10
10
  import { RenderStage, StageExecutionMode } from '../Pipeline/RenderStage.js';
@@ -26,8 +26,9 @@ import { LightSerializer } from '../Processor/LightSerializer';
26
26
  // Constants
27
27
  import { ENGINE_DEFAULTS as DEFAULT_STATE } from '../EngineDefaults.js';
28
28
 
29
- // Blue noise (loaded at runtime from CDN — not inlined to keep bundle small)
30
- const blueNoiseImage = 'https://assets.rayzee.atulmourya.com/noise/simple_bluenoise.png';
29
+ // STBN atlases - original source: https://github.com/NVIDIA-RTX/STBN/blob/main/Assets/STBN.zip
30
+ const stbnScalarAtlas = 'https://assets.rayzee.atulmourya.com/noise/stbn_scalar_atlas.png';
31
+ const stbnVec2Atlas = 'https://assets.rayzee.atulmourya.com/noise/stbn_vec2_atlas.png';
31
32
 
32
33
  /**
33
34
  * Data layout constants
@@ -190,8 +191,9 @@ export class PathTracer extends RenderStage {
190
191
  this.spotLightsData = null;
191
192
  this.areaLightsData = null;
192
193
 
193
- // Blue noise
194
- this.blueNoiseTexture = null;
194
+ // STBN noise textures
195
+ this.stbnScalarTexture = null;
196
+ this.stbnVec2Texture = null;
195
197
 
196
198
  // Packed light buffer — [lightBVH nodes (4 vec4s each) | emissive triangles (2 vec4s each)]
197
199
  // emissiveVec4Offset uniform tracks the vec4-count offset where emissive data starts.
@@ -376,25 +378,38 @@ export class PathTracer extends RenderStage {
376
378
  }
377
379
 
378
380
  /**
379
- * Setup blue noise texture
381
+ * Load STBN (Spatiotemporal Blue Noise) atlas textures.
382
+ * Each atlas is 1024×1024: 8×8 grid of 128×128 tiles, 64 temporal slices.
380
383
  */
381
384
  setupBlueNoise() {
382
385
 
383
386
  const loader = new TextureLoader();
384
387
  loader.setCrossOrigin( 'anonymous' );
385
- loader.load( blueNoiseImage, ( texture ) => {
386
388
 
387
- texture.minFilter = NearestFilter;
388
- texture.magFilter = NearestFilter;
389
- texture.wrapS = RepeatWrapping;
390
- texture.wrapT = RepeatWrapping;
391
- texture.type = FloatType;
392
- texture.generateMipmaps = false;
389
+ const configure = ( tex ) => {
393
390
 
394
- this.blueNoiseTexture = texture;
395
- blueNoiseTextureNode.value = texture;
391
+ tex.minFilter = NearestFilter;
392
+ tex.magFilter = NearestFilter;
393
+ tex.wrapS = RepeatWrapping;
394
+ tex.wrapT = RepeatWrapping;
395
+ tex.generateMipmaps = false;
396
+ return tex;
396
397
 
397
- console.log( `PathTracer: Blue noise loaded ${texture.image.width}x${texture.image.height}` );
398
+ };
399
+
400
+ loader.load( stbnScalarAtlas, ( tex ) => {
401
+
402
+ this.stbnScalarTexture = configure( tex );
403
+ stbnScalarTextureNode.value = tex;
404
+ console.log( `PathTracer: STBN scalar atlas loaded ${tex.image.width}x${tex.image.height}` );
405
+
406
+ } );
407
+
408
+ loader.load( stbnVec2Atlas, ( tex ) => {
409
+
410
+ this.stbnVec2Texture = configure( tex );
411
+ stbnVec2TextureNode.value = tex;
412
+ console.log( `PathTracer: STBN vec2 atlas loaded ${tex.image.width}x${tex.image.height}` );
398
413
 
399
414
  } );
400
415
 
@@ -1521,9 +1536,9 @@ export class PathTracer extends RenderStage {
1521
1536
 
1522
1537
  setBlueNoiseTexture( tex ) {
1523
1538
 
1524
- this.blueNoiseTexture = tex;
1525
- // Update the shared Random.js texture node so TSL shader graph uses the real texture
1526
- if ( tex ) blueNoiseTextureNode.value = tex;
1539
+ // Legacy API — sets the scalar STBN atlas texture
1540
+ this.stbnScalarTexture = tex;
1541
+ if ( tex ) stbnScalarTextureNode.value = tex;
1527
1542
 
1528
1543
  }
1529
1544
 
@@ -1676,7 +1691,8 @@ export class PathTracer extends RenderStage {
1676
1691
  this.storageTextures?.dispose();
1677
1692
 
1678
1693
  // Dispose textures
1679
- this.blueNoiseTexture?.dispose();
1694
+ this.stbnScalarTexture?.dispose();
1695
+ this.stbnVec2Texture?.dispose();
1680
1696
  this.placeholderTexture?.dispose();
1681
1697
 
1682
1698
  // Clear data references
@@ -28,12 +28,11 @@ import {
28
28
  texture,
29
29
  } from 'three/tsl';
30
30
 
31
- import { Ray, ShadowMaterial, HitInfo, DirectionSample, MaterialCache } from './Struct.js';
31
+ import { Ray, ShadowMaterial, HitInfo } from './Struct.js';
32
32
  import { PI, TWO_PI, EPSILON, REC709_LUMINANCE_COEFFICIENTS, powerHeuristic, getShadowMaterial, getDatafromStorageBuffer } from './Common.js';
33
33
  import { fresnelSchlickFloat } from './Fresnel.js';
34
34
  import { iorToFresnel0 } from './Fresnel.js';
35
35
  import {
36
- DirectionalLight, AreaLight, PointLight, SpotLight,
37
36
  sampleCone, intersectAreaLight,
38
37
  } from './LightsCore.js';
39
38
  import { calculateBeerLawAbsorption, calculateShadowTransmittance } from './MaterialTransmission.js';