shaderpad 1.0.0-beta.40 → 1.0.0-beta.42

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. package/README.md +323 -41
  2. package/dist/chunk-5CBGNOA3.mjs +10 -0
  3. package/dist/chunk-5CBGNOA3.mjs.map +1 -0
  4. package/dist/chunk-JRSBIGBN.mjs +7 -0
  5. package/dist/chunk-JRSBIGBN.mjs.map +1 -0
  6. package/dist/index.d.mts +7 -30
  7. package/dist/index.d.ts +7 -30
  8. package/dist/index.js +3 -4
  9. package/dist/index.js.map +1 -1
  10. package/dist/index.mjs +1 -1
  11. package/dist/plugins/face.d.mts +1 -3
  12. package/dist/plugins/face.d.ts +1 -3
  13. package/dist/plugins/face.js +53 -76
  14. package/dist/plugins/face.js.map +1 -1
  15. package/dist/plugins/face.mjs +51 -79
  16. package/dist/plugins/face.mjs.map +1 -1
  17. package/dist/plugins/hands.d.mts +1 -3
  18. package/dist/plugins/hands.d.ts +1 -3
  19. package/dist/plugins/hands.js +20 -16
  20. package/dist/plugins/hands.js.map +1 -1
  21. package/dist/plugins/hands.mjs +14 -15
  22. package/dist/plugins/hands.mjs.map +1 -1
  23. package/dist/plugins/helpers.js +1 -1
  24. package/dist/plugins/helpers.js.map +1 -1
  25. package/dist/plugins/helpers.mjs +1 -1
  26. package/dist/plugins/helpers.mjs.map +1 -1
  27. package/dist/plugins/mediapipe-common.d.mts +18 -0
  28. package/dist/plugins/mediapipe-common.d.ts +18 -0
  29. package/dist/plugins/mediapipe-common.js +7 -0
  30. package/dist/plugins/mediapipe-common.js.map +1 -0
  31. package/dist/plugins/mediapipe-common.mjs +2 -0
  32. package/dist/plugins/mediapipe-common.mjs.map +1 -0
  33. package/dist/plugins/pose.d.mts +1 -3
  34. package/dist/plugins/pose.d.ts +1 -3
  35. package/dist/plugins/pose.js +53 -49
  36. package/dist/plugins/pose.js.map +1 -1
  37. package/dist/plugins/pose.mjs +30 -30
  38. package/dist/plugins/pose.mjs.map +1 -1
  39. package/dist/plugins/save.d.mts +1 -1
  40. package/dist/plugins/save.d.ts +1 -1
  41. package/dist/plugins/save.js +1 -1
  42. package/dist/plugins/save.js.map +1 -1
  43. package/dist/plugins/save.mjs.map +1 -1
  44. package/dist/plugins/segmenter.d.mts +1 -3
  45. package/dist/plugins/segmenter.d.ts +1 -3
  46. package/dist/plugins/segmenter.js +17 -13
  47. package/dist/plugins/segmenter.js.map +1 -1
  48. package/dist/plugins/segmenter.mjs +11 -11
  49. package/dist/plugins/segmenter.mjs.map +1 -1
  50. package/package.json +1 -1
  51. package/dist/chunk-6C6DVCZI.mjs +0 -11
  52. package/dist/chunk-6C6DVCZI.mjs.map +0 -1
package/README.md CHANGED
@@ -280,7 +280,54 @@ shader.destroy(); // Clean up resources.
280
280
  ### Properties
281
281
 
282
282
  - `canvas` (HTMLCanvasElement): The canvas element used for rendering
283
- - `onResize?: (width: number, height: number) => void`: Callback fired when the canvas is resized
283
+
284
+ ### Event Listeners
285
+
286
+ #### `on(event, callback)`
287
+
288
+ Register a callback for a lifecycle event.
289
+
290
+ ```typescript
291
+ shader.on('resize', (width, height) => {
292
+ console.log(`Canvas resized to ${width}x${height}`);
293
+ });
294
+ ```
295
+
296
+ **Parameters:**
297
+
298
+ - `event` (string): The event name
299
+ - `callback` (Function): The callback function
300
+
301
+ #### `off(event, callback)`
302
+
303
+ Remove a previously registered callback.
304
+
305
+ **Parameters:**
306
+
307
+ - `event` (string): The event name
308
+ - `callback` (Function): The callback function to remove
309
+
310
+ #### Available Events
311
+
312
+ | Event | Callback Arguments | Description |
313
+ | ------------------- | --------------------------------------- | ------------------------------------------------ |
314
+ | `init` | none | Fired after initialization is complete |
315
+ | `resize` | `(width: number, height: number)` | Fired when the canvas element is resized |
316
+ | `updateResolution` | `(width: number, height: number)` | Fired when the drawing buffer resolution changes |
317
+ | `play` | none | Fired when the render loop starts |
318
+ | `pause` | none | Fired when the render loop is paused |
319
+ | `reset` | none | Fired when the shader is reset |
320
+ | `destroy` | none | Fired when the shader is destroyed |
321
+ | `beforeStep` | `(time: number, options?: StepOptions)` | Fired before each render step |
322
+ | `afterStep` | `(time: number, options?: StepOptions)` | Fired after each render step |
323
+ | `beforeDraw` | `(options?: StepOptions)` | Fired before each draw call |
324
+ | `afterDraw` | `(options?: StepOptions)` | Fired after each draw call |
325
+ | `initializeTexture` | `(name, source, options?)` | Fired after a texture is initialized |
326
+ | `initializeUniform` | `(name, type, value, options?)` | Fired after a uniform is initialized |
327
+ | `updateTextures` | `(updates, options?)` | Fired after textures are updated |
328
+ | `updateUniforms` | `(updates, options?)` | Fired after uniforms are updated |
329
+
330
+ Plugins may emit additional namespaced events (e.g., `face:ready`, `pose:results`).
284
331
 
285
332
  ## Options
286
333
 
@@ -293,10 +340,10 @@ The `canvas` option allows you to pass in an existing canvas element. If not pro
293
340
  ```typescript
294
341
  const canvas = document.createElement('canvas');
295
342
  const shader = new ShaderPad(fragmentShaderSrc, { canvas });
296
- shader.onResize = (width, height) => {
343
+ shader.on('resize', (width, height) => {
297
344
  canvas.width = width;
298
345
  canvas.height = height;
299
- };
346
+ });
300
347
  ```
301
348
 
302
349
  ### history
@@ -352,22 +399,22 @@ const shader = new ShaderPad(fragmentShaderSrc, { debug: true });
352
399
 
353
400
  ### plugins
354
401
 
355
- Plugins add additional functionality. Imported from separate paths to keep bundle sizes small.
402
+ ShaderPad adds additional functionality through plugins, which keeps bundle sizes small.
356
403
 
357
404
  #### helpers
358
405
 
359
- Convenience functions and constants. See [helpers.glsl](./src/plugins/helpers.glsl).
406
+ The `helpers` plugin provides convenience functions and constants. See [helpers.glsl](./src/plugins/helpers.glsl) for the implementation.
360
407
 
361
408
  ```typescript
362
409
  import helpers from 'shaderpad/plugins/helpers';
363
410
  const shader = new ShaderPad(fragmentShaderSrc, { plugins: [helpers()] });
364
411
  ```
365
412
 
366
- **Note:** Automatically injects `u_resolution`. Don't declare it yourself.
413
+ **Note:** The `helpers` plugin automatically injects the `u_resolution` uniform into your shader. Do not declare it yourself.
367
414
 
368
415
  #### save
369
416
 
370
- Adds `.save()` method to save the current frame as PNG.
417
+ The `save` plugin adds a `.save()` method to the shader that saves the current frame to a PNG file. It works on desktop and mobile.
371
418
 
372
419
  ```typescript
373
420
  import save, { WithSave } from 'shaderpad/plugins/save';
@@ -377,7 +424,7 @@ shader.save('filename', 'Optional mobile share text');
377
424
 
378
425
  #### face
379
426
 
380
- Uses [MediaPipe Face Landmarker](https://ai.google.dev/edge/mediapipe/solutions/vision/face_landmarker) to detect faces.
427
+ The `face` plugin uses [MediaPipe](https://ai.google.dev/edge/mediapipe/solutions/vision/face_landmarker) to detect faces in video or image textures.
381
428
 
382
429
  ```typescript
383
430
  import face from 'shaderpad/plugins/face';
@@ -386,28 +433,89 @@ const shader = new ShaderPad(fragmentShaderSrc, {
386
433
  });
387
434
  ```
388
435
 
389
- **Options:** `onReady?: () => void`, `onResults?: (results: FaceLandmarkerResult) => void`
436
+ **Options:**
390
437
 
391
- **Uniforms:** `u_maxFaces` (int), `u_nFaces` (int), `u_faceLandmarksTex` (sampler2D), `u_faceMask` (sampler2D)
438
+ - `maxFaces?: number` - Maximum faces to detect (default: 1)
439
+ - `history?: number` - Frames of history to store for landmarks and mask textures
392
440
 
393
- **Helper functions:** All region functions return `vec2(confidence, faceIndex)`. faceIndex is 0-indexed (-1 = no face).
441
+ **Events:**
394
442
 
395
- - `faceLandmark(int faceIndex, int landmarkIndex) -> vec4` - Returns `vec4(x, y, z, visibility)`
396
- - `leftEyebrowAt(vec2 pos) -> vec2`, `rightEyebrowAt(vec2 pos) -> vec2`
397
- - `leftEyeAt(vec2 pos) -> vec2`, `rightEyeAt(vec2 pos) -> vec2`
398
- - `lipsAt(vec2 pos) -> vec2`, `outerMouthAt(vec2 pos) -> vec2`, `innerMouthAt(vec2 pos) -> vec2`
399
- - `faceOvalAt(vec2 pos) -> vec2`, `faceAt(vec2 pos) -> vec2`
400
- - `eyeAt(vec2 pos) -> vec2`, `eyebrowAt(vec2 pos) -> vec2`
443
+ | Event | Callback Arguments | Description |
444
+ | ------------- | -------------------------------- | ---------------------------------------- |
445
+ | `face:ready` | none | Fired when the detection model is loaded |
446
+ | `face:result` | `(result: FaceLandmarkerResult)` | Fired with detection results each frame |
401
447
 
402
- **Convenience functions:** `inFace(vec2 pos) -> float`, `inEye(vec2 pos) -> float`, `inEyebrow(vec2 pos) -> float`, `inOuterMouth(vec2 pos) -> float`, `inInnerMouth(vec2 pos) -> float`, `inLips(vec2 pos) -> float`
448
+ **Uniforms:**
403
449
 
404
- **Constants:** `FACE_LANDMARK_L_EYE_CENTER`, `FACE_LANDMARK_R_EYE_CENTER`, `FACE_LANDMARK_NOSE_TIP`, `FACE_LANDMARK_FACE_CENTER`, `FACE_LANDMARK_MOUTH_CENTER`
450
+ | Uniform | Type | Description |
451
+ | -------------------- | ----------------------------- | --------------------------------------------------------------------------- |
452
+ | `u_maxFaces` | int | Maximum number of faces to detect |
453
+ | `u_nFaces` | int | Current number of detected faces |
454
+ | `u_faceLandmarksTex` | sampler2D (or sampler2DArray) | Raw landmark data texture (use `faceLandmark()` to access) |
455
+ | `u_faceMask` | sampler2D (or sampler2DArray) | Face mask texture (R: region type, G: confidence, B: normalized face index) |
405
456
 
406
- **Note:** Requires `@mediapipe/tasks-vision` as a peer dependency. Adds two custom landmarks (`FACE_CENTER`, `MOUTH_CENTER`), bringing total to 480.
457
+ **Helper functions:**
458
+
459
+ All region functions return `vec2(confidence, faceIndex)`. faceIndex is 0-indexed (-1 = no face). When `history` is enabled, all functions accept an optional `int framesAgo` parameter.
460
+
461
+ - `faceLandmark(int faceIndex, int landmarkIndex) -> vec4` - Returns landmark data as `vec4(x, y, z, visibility)`. Use `vec2(faceLandmark(...))` to get just the screen position.
462
+ - `leftEyebrowAt(vec2 pos) -> vec2` - Returns `vec2(1.0, faceIndex)` if position is in left eyebrow, `vec2(0.0, -1.0)` otherwise.
463
+ - `rightEyebrowAt(vec2 pos) -> vec2` - Returns `vec2(1.0, faceIndex)` if position is in right eyebrow, `vec2(0.0, -1.0)` otherwise.
464
+ - `leftEyeAt(vec2 pos) -> vec2` - Returns `vec2(1.0, faceIndex)` if position is in left eye, `vec2(0.0, -1.0)` otherwise.
465
+ - `rightEyeAt(vec2 pos) -> vec2` - Returns `vec2(1.0, faceIndex)` if position is in right eye, `vec2(0.0, -1.0)` otherwise.
466
+ - `lipsAt(vec2 pos) -> vec2` - Returns `vec2(1.0, faceIndex)` if position is in lips, `vec2(0.0, -1.0)` otherwise.
467
+ - `outerMouthAt(vec2 pos) -> vec2` - Returns `vec2(1.0, faceIndex)` if position is in outer mouth (lips + inner mouth), `vec2(0.0, -1.0)` otherwise.
468
+ - `innerMouthAt(vec2 pos) -> vec2` - Returns `vec2(1.0, faceIndex)` if position is in inner mouth region, `vec2(0.0, -1.0)` otherwise.
469
+ - `faceOvalAt(vec2 pos) -> vec2` - Returns `vec2(1.0, faceIndex)` if position is in face oval contour, `vec2(0.0, -1.0)` otherwise.
470
+ - `faceAt(vec2 pos) -> vec2` - Returns `vec2(1.0, faceIndex)` if position is in face mesh or oval contour, `vec2(0.0, -1.0)` otherwise.
471
+ - `eyeAt(vec2 pos) -> vec2` - Returns `vec2(1.0, faceIndex)` if position is in either eye, `vec2(0.0, -1.0)` otherwise.
472
+ - `eyebrowAt(vec2 pos) -> vec2` - Returns `vec2(1.0, faceIndex)` if position is in either eyebrow, `vec2(0.0, -1.0)` otherwise.
473
+
474
+ **Convenience functions** (return `1.0` if true, `0.0` if false):
475
+
476
+ - `inFace(vec2 pos) -> float` - Returns `1.0` if position is in face mesh, `0.0` otherwise.
477
+ - `inEye(vec2 pos) -> float` - Returns `1.0` if position is in either eye, `0.0` otherwise.
478
+ - `inEyebrow(vec2 pos) -> float` - Returns `1.0` if position is in either eyebrow, `0.0` otherwise.
479
+ - `inOuterMouth(vec2 pos) -> float` - Returns `1.0` if position is in outer mouth (lips + inner mouth), `0.0` otherwise.
480
+ - `inInnerMouth(vec2 pos) -> float` - Returns `1.0` if position is in inner mouth, `0.0` otherwise.
481
+ - `inLips(vec2 pos) -> float` - Returns `1.0` if position is in lips, `0.0` otherwise.
482
+
483
+ **Landmark Constants:**
484
+
485
+ - `FACE_LANDMARK_L_EYE_CENTER` - Left eye center landmark index
486
+ - `FACE_LANDMARK_R_EYE_CENTER` - Right eye center landmark index
487
+ - `FACE_LANDMARK_NOSE_TIP` - Nose tip landmark index
488
+ - `FACE_LANDMARK_FACE_CENTER` - Face center landmark index (custom, calculated from all landmarks)
489
+ - `FACE_LANDMARK_MOUTH_CENTER` - Mouth center landmark index (custom, calculated from inner mouth landmarks)
490
+
491
+ **Example usage:**
492
+
493
+ ```glsl
494
+ // Get a specific landmark position.
495
+ vec2 nosePos = vec2(faceLandmark(0, FACE_LANDMARK_NOSE_TIP));
496
+
497
+ // Use in* convenience functions for simple boolean checks.
498
+ float eyeMask = inEye(v_uv);
499
+
500
+ // Use faceLandmark or *At functions when you need to check a specific face index.
501
+ vec2 leftEye = leftEyeAt(v_uv);
502
+ for (int i = 0; i < u_nFaces; ++i) {
503
+ vec4 leftEyeCenter = faceLandmark(i, FACE_LANDMARK_L_EYE_CENTER);
504
+ vec4 rightEyeCenter = faceLandmark(i, FACE_LANDMARK_R_EYE_CENTER);
505
+ if (leftEye.x > 0.0 && int(leftEye.y) == i) {
506
+ // Position is inside the left eye of face i.
507
+ }
508
+ // ...
509
+ }
510
+ ```
511
+
512
+ [Landmark indices are documented here.](https://ai.google.dev/edge/mediapipe/solutions/vision/face_landmarker#face_landmarker_model) This library adds two custom landmarks: `FACE_CENTER` and `MOUTH_CENTER`. This brings the total landmark count to 480.
513
+
514
+ **Note:** The face plugin requires `@mediapipe/tasks-vision` as a peer dependency.
407
515
 
408
516
  #### pose
409
517
 
410
- Uses [MediaPipe Pose Landmarker](https://ai.google.dev/edge/mediapipe/solutions/vision/pose_landmarker). Each pose contributes 39 landmarks (33 standard + 6 custom).
518
+ The `pose` plugin uses [MediaPipe Pose Landmarker](https://ai.google.dev/edge/mediapipe/solutions/vision/pose_landmarker) to expose a flat array of 2D landmarks. Each pose contributes 39 landmarks (33 standard + 6 custom), enumerated below.
411
519
 
412
520
  ```typescript
413
521
  import pose from 'shaderpad/plugins/pose';
@@ -416,23 +524,99 @@ const shader = new ShaderPad(fragmentShaderSrc, {
416
524
  });
417
525
  ```
418
526
 
419
- **Options:** `onReady?: () => void`, `onResults?: (results: PoseLandmarkerResult) => void`
527
+ **Options:**
528
+
529
+ - `maxPoses?: number` - Maximum poses to detect (default: 1)
530
+ - `history?: number` - Frames of history to store for landmarks and mask textures
420
531
 
421
- **Uniforms:** `u_maxPoses` (int), `u_nPoses` (int), `u_poseLandmarksTex` (sampler2D), `u_poseMask` (sampler2D)
532
+ **Events:**
422
533
 
423
- **Helper functions:**
534
+ | Event | Callback Arguments | Description |
535
+ | ------------- | -------------------------------- | ---------------------------------------- |
536
+ | `pose:ready` | none | Fired when the detection model is loaded |
537
+ | `pose:result` | `(result: PoseLandmarkerResult)` | Fired with detection results each frame |
424
538
 
425
- - `poseLandmark(int poseIndex, int landmarkIndex) -> vec4` - Returns `vec4(x, y, z, visibility)`
426
- - `poseAt(vec2 pos) -> vec2` - Returns `vec2(confidence, poseIndex)`
427
- - `inPose(vec2 pos) -> float`
539
+ **Uniforms:**
428
540
 
429
- **Constants:** `POSE_LANDMARK_LEFT_EYE`, `POSE_LANDMARK_RIGHT_EYE`, `POSE_LANDMARK_LEFT_SHOULDER`, `POSE_LANDMARK_RIGHT_SHOULDER`, `POSE_LANDMARK_LEFT_ELBOW`, `POSE_LANDMARK_RIGHT_ELBOW`, `POSE_LANDMARK_LEFT_HIP`, `POSE_LANDMARK_RIGHT_HIP`, `POSE_LANDMARK_LEFT_KNEE`, `POSE_LANDMARK_RIGHT_KNEE`, `POSE_LANDMARK_BODY_CENTER`, `POSE_LANDMARK_LEFT_HAND_CENTER`, `POSE_LANDMARK_RIGHT_HAND_CENTER`, `POSE_LANDMARK_LEFT_FOOT_CENTER`, `POSE_LANDMARK_RIGHT_FOOT_CENTER`, `POSE_LANDMARK_TORSO_CENTER`
541
+ | Uniform | Type | Description |
542
+ | -------------------- | ----------------------------- | ----------------------------------------------------------------------------- |
543
+ | `u_maxPoses` | int | Maximum number of poses to track |
544
+ | `u_nPoses` | int | Current number of detected poses |
545
+ | `u_poseLandmarksTex` | sampler2D (or sampler2DArray) | Raw landmark data texture (RGBA: x, y, z, visibility) |
546
+ | `u_poseMask` | sampler2D (or sampler2DArray) | Pose mask texture (R: body detected, G: confidence, B: normalized pose index) |
430
547
 
431
- **Note:** Requires `@mediapipe/tasks-vision`. Use `PoseLandmarker.POSE_CONNECTIONS` for skeleton connections. [Landmark indices](https://ai.google.dev/edge/mediapipe/solutions/vision/pose_landmarker#pose_landmarker_model)
548
+ **Helper functions:**
549
+
550
+ When `history` is enabled, all functions accept an optional `int framesAgo` parameter.
551
+
552
+ - `poseLandmark(int poseIndex, int landmarkIndex) -> vec4` - Returns landmark data as `vec4(x, y, z, visibility)`. Use `vec2(poseLandmark(...))` to get just the screen position.
553
+ - `poseAt(vec2 pos) -> vec2` - Returns `vec2(confidence, poseIndex)`. poseIndex is 0-indexed (-1 = no pose), confidence is the segmentation confidence.
554
+ - `inPose(vec2 pos) -> float` - Returns `1.0` if position is in any pose, `0.0` otherwise.
555
+
556
+ **Constants:**
557
+
558
+ - `POSE_LANDMARK_LEFT_EYE` - Left eye landmark index (2)
559
+ - `POSE_LANDMARK_RIGHT_EYE` - Right eye landmark index (5)
560
+ - `POSE_LANDMARK_LEFT_SHOULDER` - Left shoulder landmark index (11)
561
+ - `POSE_LANDMARK_RIGHT_SHOULDER` - Right shoulder landmark index (12)
562
+ - `POSE_LANDMARK_LEFT_ELBOW` - Left elbow landmark index (13)
563
+ - `POSE_LANDMARK_RIGHT_ELBOW` - Right elbow landmark index (14)
564
+ - `POSE_LANDMARK_LEFT_HIP` - Left hip landmark index (23)
565
+ - `POSE_LANDMARK_RIGHT_HIP` - Right hip landmark index (24)
566
+ - `POSE_LANDMARK_LEFT_KNEE` - Left knee landmark index (25)
567
+ - `POSE_LANDMARK_RIGHT_KNEE` - Right knee landmark index (26)
568
+ - `POSE_LANDMARK_BODY_CENTER` - Body center landmark index (33, custom, calculated from all landmarks)
569
+ - `POSE_LANDMARK_LEFT_HAND_CENTER` - Left hand center landmark index (34, custom, calculated from pinky, thumb, wrist, index)
570
+ - `POSE_LANDMARK_RIGHT_HAND_CENTER` - Right hand center landmark index (35, custom, calculated from pinky, thumb, wrist, index)
571
+ - `POSE_LANDMARK_LEFT_FOOT_CENTER` - Left foot center landmark index (36, custom, calculated from ankle, heel, foot index)
572
+ - `POSE_LANDMARK_RIGHT_FOOT_CENTER` - Right foot center landmark index (37, custom, calculated from ankle, heel, foot index)
573
+ - `POSE_LANDMARK_TORSO_CENTER` - Torso center landmark index (38, custom, calculated from shoulders and hips)
574
+
575
+ **Note:** For connecting pose landmarks (e.g., drawing skeleton lines), `PoseLandmarker.POSE_CONNECTIONS` from `@mediapipe/tasks-vision` provides an array of `{ start, end }` pairs that define which landmarks should be connected.
576
+
577
+ Use `poseLandmark(int poseIndex, int landmarkIndex)` in GLSL to retrieve a specific point. Landmark indices are:
578
+
579
+ | Index | Landmark | Index | Landmark |
580
+ | ----- | ----------------- | ----- | -------------------------- |
581
+ | 0 | nose | 20 | right index |
582
+ | 1 | left eye (inner) | 21 | left thumb |
583
+ | 2 | left eye | 22 | right thumb |
584
+ | 3 | left eye (outer) | 23 | left hip |
585
+ | 4 | right eye (inner) | 24 | right hip |
586
+ | 5 | right eye | 25 | left knee |
587
+ | 6 | right eye (outer) | 26 | right knee |
588
+ | 7 | left ear | 27 | left ankle |
589
+ | 8 | right ear | 28 | right ankle |
590
+ | 9 | mouth (left) | 29 | left heel |
591
+ | 10 | mouth (right) | 30 | right heel |
592
+ | 11 | left shoulder | 31 | left foot index |
593
+ | 12 | right shoulder | 32 | right foot index |
594
+ | 13 | left elbow | 33 | body center (custom) |
595
+ | 14 | right elbow | 34 | left hand center (custom) |
596
+ | 15 | left wrist | 35 | right hand center (custom) |
597
+ | 16 | right wrist | 36 | left foot center (custom) |
598
+ | 17 | left pinky | 37 | right foot center (custom) |
599
+ | 18 | right pinky | 38 | torso center (custom) |
600
+ | 19 | left index | | |
601
+
602
+ [Source](https://ai.google.dev/edge/mediapipe/solutions/vision/pose_landmarker#pose_landmarker_model)
603
+
604
+ [Landmark indices are documented here.](https://ai.google.dev/edge/mediapipe/solutions/vision/pose_landmarker#pose_landmarker_model) This library adds six custom landmarks: `BODY_CENTER`, `LEFT_HAND_CENTER`, `RIGHT_HAND_CENTER`, `LEFT_FOOT_CENTER`, `RIGHT_FOOT_CENTER`, and `TORSO_CENTER`. This brings the total landmark count to 39.
605
+
606
+ A minimal fragment shader loop looks like:
607
+
608
+ ```glsl
609
+ for (int i = 0; i < u_maxPoses; ++i) {
610
+ if (i >= u_nPoses) break;
611
+ vec2 leftHip = vec2(poseLandmark(i, POSE_LANDMARK_LEFT_HIP));
612
+ vec2 rightHip = vec2(poseLandmark(i, POSE_LANDMARK_RIGHT_HIP));
613
+ // …
614
+ }
615
+ ```
432
616
 
433
617
  #### hands
434
618
 
435
- Uses [MediaPipe Hand Landmarker](https://ai.google.dev/edge/mediapipe/solutions/vision/hand_landmarker). Each hand contributes 22 landmarks.
619
+ The `hands` plugin uses [MediaPipe Hand Landmarker](https://ai.google.dev/edge/mediapipe/solutions/vision/hand_landmarker) to expose a flat array of 2D landmarks. Each hand contributes 22 landmarks, enumerated below.
436
620
 
437
621
  ```typescript
438
622
  import hands from 'shaderpad/plugins/hands';
@@ -441,37 +625,135 @@ const shader = new ShaderPad(fragmentShaderSrc, {
441
625
  });
442
626
  ```
443
627
 
444
- **Options:** `onReady?: () => void`, `onResults?: (results: HandLandmarkerResult) => void`
628
+ **Options:**
629
+
630
+ - `maxHands?: number` - Maximum hands to detect (default: 2)
631
+ - `history?: number` - Frames of history to store for landmarks texture
632
+
633
+ **Events:**
445
634
 
446
- **Uniforms:** `u_maxHands` (int), `u_nHands` (int), `u_handLandmarksTex` (sampler2D)
635
+ | Event | Callback Arguments | Description |
636
+ | -------------- | -------------------------------- | ---------------------------------------- |
637
+ | `hands:ready` | none | Fired when the detection model is loaded |
638
+ | `hands:result` | `(result: HandLandmarkerResult)` | Fired with detection results each frame |
639
+
640
+ **Uniforms:**
641
+
642
+ | Uniform | Type | Description |
643
+ | -------------------- | ----------------------------- | ----------------------------------------------------- |
644
+ | `u_maxHands` | int | Maximum number of hands to track |
645
+ | `u_nHands` | int | Current number of detected hands |
646
+ | `u_handLandmarksTex` | sampler2D (or sampler2DArray) | Raw landmark data texture (RGBA: x, y, z, handedness) |
447
647
 
448
648
  **Helper functions:**
449
649
 
450
- - `handLandmark(int handIndex, int landmarkIndex) -> vec4` - Returns `vec4(x, y, z, handedness)`. Handedness: 0.0 = left, 1.0 = right.
451
- - `isRightHand(int handIndex) -> float`, `isLeftHand(int handIndex) -> float`
650
+ When `history` is enabled, all functions accept an optional `int framesAgo` parameter.
651
+
652
+ - `handLandmark(int handIndex, int landmarkIndex) -> vec4` - Returns landmark data as `vec4(x, y, z, handedness)`. Use `vec2(handLandmark(...))` to get just the screen position. Handedness: 0.0 = left hand, 1.0 = right hand.
653
+ - `isRightHand(int handIndex) -> float` - Returns 1.0 if the hand is a right hand, 0.0 if left.
654
+ - `isLeftHand(int handIndex) -> float` - Returns 1.0 if the hand is a left hand, 0.0 if right.
655
+
656
+ **Landmark Indices:**
657
+
658
+ | Index | Landmark | Index | Landmark |
659
+ | ----- | ----------------- | ----- | ----------------- |
660
+ | 0 | WRIST | 11 | MIDDLE_FINGER_DIP |
661
+ | 1 | THUMB_CMC | 12 | MIDDLE_FINGER_TIP |
662
+ | 2 | THUMB_MCP | 13 | RING_FINGER_MCP |
663
+ | 3 | THUMB_IP | 14 | RING_FINGER_PIP |
664
+ | 4 | THUMB_TIP | 15 | RING_FINGER_DIP |
665
+ | 5 | INDEX_FINGER_MCP | 16 | RING_FINGER_TIP |
666
+ | 6 | INDEX_FINGER_PIP | 17 | PINKY_MCP |
667
+ | 7 | INDEX_FINGER_DIP | 18 | PINKY_PIP |
668
+ | 8 | INDEX_FINGER_TIP | 19 | PINKY_DIP |
669
+ | 9 | MIDDLE_FINGER_MCP | 20 | PINKY_TIP |
670
+ | 10 | MIDDLE_FINGER_PIP | 21 | HAND_CENTER |
452
671
 
453
- **Note:** Requires `@mediapipe/tasks-vision`. [Landmark indices](https://ai.google.dev/edge/mediapipe/solutions/vision/hand_landmarker#models)
672
+ [Source](https://ai.google.dev/edge/mediapipe/solutions/vision/hand_landmarker#models)
673
+
674
+ A minimal fragment shader loop looks like:
675
+
676
+ ```glsl
677
+ #define WRIST 0
678
+ #define THUMB_TIP 4
679
+ #define INDEX_TIP 8
680
+ #define HAND_CENTER 21
681
+
682
+ for (int i = 0; i < u_maxHands; ++i) {
683
+ if (i >= u_nHands) break;
684
+ vec2 wrist = vec2(handLandmark(i, WRIST));
685
+ vec2 thumbTip = vec2(handLandmark(i, THUMB_TIP));
686
+ vec2 indexTip = vec2(handLandmark(i, INDEX_TIP));
687
+ vec2 handCenter = vec2(handLandmark(i, HAND_CENTER));
688
+
689
+ // Use handedness for coloring (0.0 = left/black, 1.0 = right/white).
690
+ vec3 handColor = vec3(isRightHand(i));
691
+ // …
692
+ }
693
+ ```
694
+
695
+ **Note:** The hands plugin requires `@mediapipe/tasks-vision` as a peer dependency.
454
696
 
455
697
  #### segmenter
456
698
 
457
- Uses [MediaPipe Image Segmenter](https://ai.google.dev/edge/mediapipe/solutions/vision/image_segmenter). Supports multi-category models. Defaults to [hair segmentation model](https://ai.google.dev/edge/mediapipe/solutions/vision/image_segmenter#hair-model).
699
+ The `segmenter` plugin uses [MediaPipe Image Segmenter](https://ai.google.dev/edge/mediapipe/solutions/vision/image_segmenter) to segment objects in video or image textures. It supports models with multiple categories (e.g., background, hair, chair, dog…). By default, it uses the [hair segmentation model](https://ai.google.dev/edge/mediapipe/solutions/vision/image_segmenter#hair-model).
458
700
 
459
701
  ```typescript
702
+ import ShaderPad from 'shaderpad';
460
703
  import segmenter from 'shaderpad/plugins/segmenter';
704
+
461
705
  const shader = new ShaderPad(fragmentShaderSrc, {
462
- plugins: [segmenter({ textureName: 'u_webcam', options: { outputCategoryMask: true } })],
706
+ plugins: [
707
+ segmenter({
708
+ textureName: 'u_webcam',
709
+ options: {
710
+ modelPath:
711
+ 'https://storage.googleapis.com/mediapipe-models/image_segmenter/selfie_multiclass_256x256/float32/latest/selfie_multiclass_256x256.tflite',
712
+ outputCategoryMask: true,
713
+ },
714
+ }),
715
+ ],
463
716
  });
464
717
  ```
465
718
 
466
- **Options:** `onReady?: () => void`, `onResults?: (results: ImageSegmenterResult) => void`
719
+ **Options:**
720
+
721
+ - `history?: number` - Frames of history to store for mask texture
722
+
723
+ **Events:**
724
+
725
+ | Event | Callback Arguments | Description |
726
+ | ------------------ | -------------------------------- | ------------------------------------------- |
727
+ | `segmenter:ready` | none | Fired when the segmentation model is loaded |
728
+ | `segmenter:result` | `(result: ImageSegmenterResult)` | Fired with segmentation results each frame |
467
729
 
468
- **Uniforms:** `u_segmentMask` (sampler2D), `u_numCategories` (int)
730
+ **Uniforms:**
731
+
732
+ | Uniform | Type | Description |
733
+ | ----------------- | ----------------------------- | ----------------------------------------------------------------------- |
734
+ | `u_segmentMask` | sampler2D (or sampler2DArray) | Segment mask texture (R: normalized category, G: confidence, B: unused) |
735
+ | `u_numCategories` | int | Number of segmentation categories (including background) |
469
736
 
470
737
  **Helper functions:**
471
738
 
472
- - `segmentAt(vec2 pos) -> vec2` - Returns `vec2(confidence, categoryIndex)`. categoryIndex is 0-indexed (-1 = background).
739
+ When `history` is enabled, all functions accept an optional `int framesAgo` parameter.
740
+
741
+ - `segmentAt(vec2 pos) -> vec2` - Returns `vec2(confidence, categoryIndex)`. categoryIndex is 0-indexed (-1 = background). confidence is the segmentation confidence (0-1).
742
+
743
+ **Example usage:**
744
+
745
+ ```glsl
746
+ vec2 segment = segmentAt(v_uv);
747
+ float confidence = segment.x; // Segmentation confidence
748
+ float category = segment.y; // Category index (0-indexed, -1 = background)
749
+
750
+ if (category >= 0.0) {
751
+ // Apply effect based on confidence.
752
+ color = mix(color, vec3(1.0, 0.0, 1.0), confidence);
753
+ }
754
+ ```
473
755
 
474
- **Note:** Requires `@mediapipe/tasks-vision` as a peer dependency.
756
+ **Note:** The segmenter plugin requires `@mediapipe/tasks-vision` as a peer dependency.
475
757
 
476
758
  ## Contributing
477
759
 
@@ -0,0 +1,10 @@
1
+ var F=`#version 300 es
2
+ in vec2 aPosition;
3
+ out vec2 v_uv;
4
+ void main() {
5
+ v_uv = aPosition * 0.5 + 0.5;
6
+ gl_Position = vec4(aPosition, 0.0, 1.0);
7
+ }`,w=33.333333333333336,L=new Float32Array([-1,-1,1,-1,-1,1,-1,1,1,-1,1,1]),v=Symbol("u_history"),b=Symbol("__SHADERPAD_BUFFER");function A(a,e){if(!e?.length)return a;let t=a.split(`
8
+ `),s=t.findLastIndex(i=>{let r=i.trimStart();return r.startsWith("precision ")||r.startsWith("#version ")})+1;return t.splice(s,0,...e),t.join(`
9
+ `)}function y(a){return a instanceof WebGLTexture?{width:0,height:0}:a instanceof E?{width:a.canvas.width,height:a.canvas.height}:a instanceof HTMLVideoElement?{width:a.videoWidth,height:a.videoHeight}:a instanceof HTMLImageElement?{width:a.naturalWidth??a.width,height:a.naturalHeight??a.height}:{width:a.width,height:a.height}}function x(a){return typeof a=="symbol"?a.description??"":a}var E=class a{isInternalCanvas=!1;isTouchDevice=!1;gl;uniforms=new Map;textures=new Map;textureUnitPool;buffer=null;program=null;aPositionLocation=0;animationFrameId;resolutionObserver;resizeObserver;resizeTimeout=null;lastResizeTime=-1/0;eventListeners=new Map;frame=0;startTime=0;cursorPosition=[.5,.5];clickPosition=[.5,.5];isMouseDown=!1;canvas;hooks=new Map;historyDepth=0;textureOptions;debug;intermediateFbo=null;constructor(e,{canvas:t,plugins:s,history:i,debug:r,...o}={}){if(this.canvas=t||document.createElement("canvas"),!t){this.isInternalCanvas=!0;let f=this.canvas;f.style.position="fixed",f.style.inset="0",f.style.height="100dvh",f.style.width="100dvw",document.body.appendChild(f)}if(this.canvas instanceof OffscreenCanvas){let f=d=>{let g=Object.getOwnPropertyDescriptor(OffscreenCanvas.prototype,d);Object.defineProperty(this.canvas,d,{get:()=>g.get.call(this.canvas),set:R=>{g.set.call(this.canvas,R),this.updateResolution()},configurable:g.configurable,enumerable:g.enumerable})};f("width"),f("height")}let n=this.canvas.getContext("webgl2",{antialias:!1});if(!n)throw new Error("WebGL2 not supported. Please use a browser that supports WebGL2.");this.gl=n,this.textureUnitPool={free:[],next:0,max:n.getParameter(n.MAX_COMBINED_TEXTURE_IMAGE_UNITS)},this.textureOptions=o;let{internalFormat:h,type:c}=o;(c===n.FLOAT||c===n.HALF_FLOAT||h===n.RGBA16F||h===n.RGBA32F||h===n.R16F||h===n.R32F||h===n.RG16F||h===n.RG32F)&&!n.getExtension("EXT_color_buffer_float")&&(console.warn("EXT_color_buffer_float not supported, falling back to RGBA8"),delete this.textureOptions?.internalFormat,delete this.textureOptions?.format,delete this.textureOptions?.type),i&&(this.historyDepth=i),this.debug=r??(typeof process<"u"&&!1),this.animationFrameId=null,this.resolutionObserver=new MutationObserver(()=>this.updateResolution()),this.resizeObserver=new ResizeObserver(()=>this.throttledHandleResize());let m=[];s&&s.forEach(f=>f(this,{gl:n,canvas:this.canvas,injectGLSL:d=>{m.push(d)},emitHook:this.emitHook.bind(this)}));let u=this.gl.createProgram();if(!u)throw new Error("Failed to create WebGL program");this.program=u;let p=this.createShader(this.gl.VERTEX_SHADER,F),T=this.createShader(n.FRAGMENT_SHADER,A(e,m));if(n.attachShader(u,p),n.attachShader(u,T),n.linkProgram(u),n.deleteShader(p),n.deleteShader(T),!n.getProgramParameter(u,n.LINK_STATUS))throw console.error("Program link error:",n.getProgramInfoLog(u)),n.deleteProgram(u),new Error("Failed to link WebGL program");this.aPositionLocation=n.getAttribLocation(u,"aPosition"),this.buffer=n.createBuffer(),n.bindBuffer(n.ARRAY_BUFFER,this.buffer),n.bufferData(n.ARRAY_BUFFER,L,n.STATIC_DRAW),n.viewport(0,0,n.drawingBufferWidth,n.drawingBufferHeight),n.enableVertexAttribArray(this.aPositionLocation),n.vertexAttribPointer(this.aPositionLocation,2,n.FLOAT,!1,0,0),n.useProgram(u),this.canvas instanceof HTMLCanvasElement&&(this.resolutionObserver.observe(this.canvas,{attributes:!0,attributeFilter:["width","height"]}),this.resizeObserver.observe(this.canvas)),this.isInternalCanvas||this.updateResolution(),this.initializeUniform("u_cursor","float",this.cursorPosition),this.initializeUniform("u_click","float",[...this.clickPosition,this.isMouseDown?1:0]),this.initializeUniform("u_time","float",0),this.initializeUniform("u_frame","int",0),this._initializeTexture(b,this.canvas,{...this.textureOptions}),this.intermediateFbo=n.createFramebuffer(),this.historyDepth>0&&this._initializeTexture(v,this.canvas,{history:this.historyDepth,...this.textureOptions}),this.canvas instanceof HTMLCanvasElement&&this.addEventListeners(),this.emitHook("init")}emitHook(e,...t){this.hooks.get(e)?.forEach(s=>s.call(this,...t))}on(e,t){this.hooks.has(e)||this.hooks.set(e,[]),this.hooks.get(e).push(t)}off(e,t){let s=this.hooks.get(e);s&&s.splice(s.indexOf(t),1)}createShader(e,t){let s=this.gl.createShader(e);if(this.gl.shaderSource(s,t),this.gl.compileShader(s),!this.gl.getShaderParameter(s,this.gl.COMPILE_STATUS))throw console.error("Shader compilation failed:",t),console.error(this.gl.getShaderInfoLog(s)),this.gl.deleteShader(s),new Error("Shader compilation failed");return s}throttledHandleResize(){clearTimeout(this.resizeTimeout);let e=performance.now(),t=this.lastResizeTime+w-e;t<=0?(this.lastResizeTime=e,this.handleResize()):this.resizeTimeout=setTimeout(()=>this.throttledHandleResize(),t)}handleResize(){if(!(this.canvas instanceof HTMLCanvasElement))return;let e=window.devicePixelRatio||1,t=this.canvas.clientWidth*e,s=this.canvas.clientHeight*e;this.isInternalCanvas&&(this.canvas.width!==t||this.canvas.height!==s)&&(this.canvas.width=t,this.canvas.height=s),this.emitHook("resize",t,s)}addEventListeners(){let e=this.canvas,t=(i,r)=>{if(!this.uniforms.has("u_cursor"))return;let o=e.getBoundingClientRect();this.cursorPosition[0]=(i-o.left)/o.width,this.cursorPosition[1]=1-(r-o.top)/o.height,this.updateUniforms({u_cursor:this.cursorPosition})},s=(i,r,o)=>{if(this.uniforms.has("u_click")){if(this.isMouseDown=i,i){let n=e.getBoundingClientRect(),h=r,c=o;this.clickPosition[0]=(h-n.left)/n.width,this.clickPosition[1]=1-(c-n.top)/n.height}this.updateUniforms({u_click:[...this.clickPosition,this.isMouseDown?1:0]})}};this.eventListeners.set("mousemove",i=>{let r=i;this.isTouchDevice||t(r.clientX,r.clientY)}),this.eventListeners.set("mousedown",i=>{let r=i;this.isTouchDevice||r.button===0&&(this.isMouseDown=!0,s(!0,r.clientX,r.clientY))}),this.eventListeners.set("mouseup",i=>{let r=i;this.isTouchDevice||r.button===0&&s(!1)}),this.eventListeners.set("touchmove",i=>{let r=i;r.touches.length>0&&t(r.touches[0].clientX,r.touches[0].clientY)}),this.eventListeners.set("touchstart",i=>{let r=i;this.isTouchDevice=!0,r.touches.length>0&&(t(r.touches[0].clientX,r.touches[0].clientY),s(!0,r.touches[0].clientX,r.touches[0].clientY))}),this.eventListeners.set("touchend",i=>{i.touches.length===0&&s(!1)}),this.eventListeners.forEach((i,r)=>{e.addEventListener(r,i)})}updateResolution(){let e=[this.gl.drawingBufferWidth,this.gl.drawingBufferHeight];this.gl.viewport(0,0,...e),this.uniforms.has("u_resolution")?this.updateUniforms({u_resolution:e}):this.initializeUniform("u_resolution","float",e),this.resizeTexture(b,...e),this.historyDepth>0&&this.resizeTexture(v,...e),this.emitHook("updateResolution",...e)}resizeTexture(e,t,s){let i=this.textures.get(e);if(!i||i.width===t&&i.height===s)return;this.gl.deleteTexture(i.texture),i.width=t,i.height=s;let{texture:r}=this.createTexture(e,i);i.texture=r,i.history&&(i.history.writeIndex=0,this.clearHistoryTextureLayers(i))}reserveTextureUnit(e){let t=this.textures.get(e);if(t)return t.unitIndex;if(this.textureUnitPool.free.length>0)return this.textureUnitPool.free.pop();if(this.textureUnitPool.next>=this.textureUnitPool.max)throw new Error("Exceeded the available texture units for this device.");return this.textureUnitPool.next++}releaseTextureUnit(e){let t=this.textures.get(e);t&&this.textureUnitPool.free.push(t.unitIndex)}resolveTextureOptions(e){let{gl:t}=this,s=e?.type??t.UNSIGNED_BYTE;return{type:s,format:e?.format??t.RGBA,internalFormat:e?.internalFormat??(s===t.FLOAT?t.RGBA32F:s===t.HALF_FLOAT?t.RGBA16F:t.RGBA8),minFilter:e?.minFilter??t.LINEAR,magFilter:e?.magFilter??t.LINEAR,wrapS:e?.wrapS??t.CLAMP_TO_EDGE,wrapT:e?.wrapT??t.CLAMP_TO_EDGE,preserveY:e?.preserveY}}getPixelArray(e,t){return e===this.gl.FLOAT?new Float32Array(t):e===this.gl.HALF_FLOAT?new Uint16Array(t):new Uint8Array(t)}clearHistoryTextureLayers(e){if(!e.history)return;let t=this.gl,{type:s,format:i}=e.options,r=this.getPixelArray(s,e.width*e.height*4);t.activeTexture(t.TEXTURE0+e.unitIndex),t.bindTexture(t.TEXTURE_2D_ARRAY,e.texture);for(let o=0;o<e.history.depth;++o)t.texSubImage3D(t.TEXTURE_2D_ARRAY,0,0,0,o,e.width,e.height,1,i,s,r)}initializeUniform(e,t,s,i){let r=i?.arrayLength;if(this.uniforms.has(e))throw new Error(`${e} is already initialized.`);if(t!=="float"&&t!=="int")throw new Error(`Invalid uniform type: ${t}. Expected 'float' or 'int'.`);if(r&&!(Array.isArray(s)&&s.length===r))throw new Error(`${e} array length mismatch: must initialize with ${r} elements.`);let o=this.gl.getUniformLocation(this.program,e);if(!o&&r&&(o=this.gl.getUniformLocation(this.program,`${e}[0]`)),!o){this.log(`${e} not in shader. Skipping initialization.`);return}let n=r?s[0]:s,h=Array.isArray(n)?n.length:1;this.uniforms.set(e,{type:t,length:h,location:o,arrayLength:r});try{this.updateUniforms({[e]:s})}catch(c){throw this.uniforms.delete(e),c}this.emitHook("initializeUniform",...arguments)}log(...e){this.debug&&console.debug(...e)}updateUniforms(e,t){this.gl.useProgram(this.program),Object.entries(e).forEach(([s,i])=>{let r=this.uniforms.get(s);if(!r){this.log(`${s} not in shader. Skipping update.`);return}let o=`uniform${r.length}${r.type.charAt(0)}`;if(r.arrayLength){if(!Array.isArray(i))throw new Error(`${s} is an array, but the value passed to updateUniforms is not an array.`);let n=i.length;if(!n)return;if(n>r.arrayLength)throw new Error(`${s} received ${n} values, but maximum length is ${r.arrayLength}.`);if(i.some(l=>(Array.isArray(l)?l.length:1)!==r.length))throw new Error(`Tried to update ${s} with some elements that are not length ${r.length}.`);let h=new(r.type==="float"?Float32Array:Int32Array)(i.flat()),c=r.location;if(t?.startIndex){let l=this.gl.getUniformLocation(this.program,`${s}[${t.startIndex}]`);if(!l)throw new Error(`${s}[${t.startIndex}] not in shader. Did you pass an invalid startIndex?`);c=l}this.gl[o+"v"](c,h)}else{if(Array.isArray(i)||(i=[i]),i.length!==r.length)throw new Error(`Invalid uniform value length: ${i.length}. Expected ${r.length}.`);this.gl[o](r.location,...i)}}),this.emitHook("updateUniforms",...arguments)}createTexture(e,t){let{width:s,height:i}=t,r=t.history?.depth??0,o=this.gl.createTexture();if(!o)throw new Error("Failed to create texture");let n=t.unitIndex;if(typeof n!="number")try{n=this.reserveTextureUnit(e)}catch(m){throw this.gl.deleteTexture(o),m}let h=r>0,c=h?this.gl.TEXTURE_2D_ARRAY:this.gl.TEXTURE_2D,{options:l}=t;return this.gl.activeTexture(this.gl.TEXTURE0+n),this.gl.bindTexture(c,o),this.gl.texParameteri(c,this.gl.TEXTURE_WRAP_S,l.wrapS),this.gl.texParameteri(c,this.gl.TEXTURE_WRAP_T,l.wrapT),this.gl.texParameteri(c,this.gl.TEXTURE_MIN_FILTER,l.minFilter),this.gl.texParameteri(c,this.gl.TEXTURE_MAG_FILTER,l.magFilter),h?this.gl.texStorage3D(c,1,l.internalFormat,s,i,r):e===b&&this.gl.texImage2D(this.gl.TEXTURE_2D,0,l.internalFormat,s,i,0,l.format,l.type,null),{texture:o,unitIndex:n}}_initializeTexture(e,t,s){if(this.textures.has(e))throw new Error(`Texture '${x(e)}' is already initialized.`);let{history:i=0,...r}=s??{},{width:o,height:n}=y(t);if(!o||!n)throw new Error("Texture source must have valid dimensions");let h={width:o,height:n,options:this.resolveTextureOptions(r)};i>0&&(h.history={depth:i,writeIndex:0});let{texture:c,unitIndex:l}=this.createTexture(e,h),m={texture:c,unitIndex:l,...h};i>0&&(this.initializeUniform(`${x(e)}FrameOffset`,"int",0),this.clearHistoryTextureLayers(m)),this.textures.set(e,m),this.updateTexture(e,t);let u=this.gl.getUniformLocation(this.program,x(e));u&&this.gl.uniform1i(u,l)}initializeTexture(e,t,s){this._initializeTexture(e,t,s),this.emitHook("initializeTexture",...arguments)}updateTextures(e,t){Object.entries(e).forEach(([s,i])=>{this.updateTexture(s,i,t)}),this.emitHook("updateTextures",...arguments)}updateTexture(e,t,s){let i=this.textures.get(e);if(!i)throw new Error(`Texture '${x(e)}' is not initialized.`);if(t instanceof WebGLTexture){this.gl.activeTexture(this.gl.TEXTURE0+i.unitIndex),this.gl.bindTexture(this.gl.TEXTURE_2D,t);return}let r=t;if(t instanceof a){let u=t.textures.get(b);if(t.gl===this.gl){this.gl.activeTexture(this.gl.TEXTURE0+i.unitIndex),this.gl.bindTexture(this.gl.TEXTURE_2D,u.texture);return}let{width:p,height:T,options:{format:f,type:d}}=u,g=this.getPixelArray(d,p*T*4);t.gl.bindFramebuffer(t.gl.FRAMEBUFFER,t.intermediateFbo),t.gl.readPixels(0,0,p,T,f,d,g),t.gl.bindFramebuffer(t.gl.FRAMEBUFFER,null),r={data:g,width:p,height:T}}let{width:o,height:n}=y(r);if(!o||!n)return;let h="isPartial"in r&&r.isPartial;h||this.resizeTexture(e,o,n);let l=!("data"in r&&r.data)&&!i.options?.preserveY,m=this.gl.getParameter(this.gl.UNPACK_FLIP_Y_WEBGL);if(i.history){if(this.gl.activeTexture(this.gl.TEXTURE0+i.unitIndex),this.gl.bindTexture(this.gl.TEXTURE_2D_ARRAY,i.texture),!s?.skipHistoryWrite){this.gl.pixelStorei(this.gl.UNPACK_FLIP_Y_WEBGL,l),this.gl.texSubImage3D(this.gl.TEXTURE_2D_ARRAY,0,0,0,i.history.writeIndex,o,n,1,i.options.format,i.options.type,r.data??r),this.gl.pixelStorei(this.gl.UNPACK_FLIP_Y_WEBGL,m);let u=`${x(e)}FrameOffset`;this.updateUniforms({[u]:i.history.writeIndex}),i.history.writeIndex=(i.history.writeIndex+1)%i.history.depth}}else{if(this.gl.activeTexture(this.gl.TEXTURE0+i.unitIndex),this.gl.bindTexture(this.gl.TEXTURE_2D,i.texture),this.gl.pixelStorei(this.gl.UNPACK_FLIP_Y_WEBGL,l),h){let u=r;this.gl.texSubImage2D(this.gl.TEXTURE_2D,0,u.x??0,u.y??0,o,n,i.options.format,i.options.type,u.data)}else this.gl.texImage2D(this.gl.TEXTURE_2D,0,i.options.internalFormat,o,n,0,i.options.format,i.options.type,r.data??r);this.gl.pixelStorei(this.gl.UNPACK_FLIP_Y_WEBGL,m)}}draw(e){this.emitHook("beforeDraw",...arguments);let t=this.gl,s=t.drawingBufferWidth,i=t.drawingBufferHeight,r=this.textures.get(b);t.bindFramebuffer(t.FRAMEBUFFER,this.intermediateFbo),t.framebufferTexture2D(t.FRAMEBUFFER,t.COLOR_ATTACHMENT0,t.TEXTURE_2D,r.texture,0),t.useProgram(this.program),t.bindBuffer(t.ARRAY_BUFFER,this.buffer),t.vertexAttribPointer(this.aPositionLocation,2,t.FLOAT,!1,0,0),t.enableVertexAttribArray(this.aPositionLocation),t.viewport(0,0,s,i),e?.skipClear||t.clear(t.COLOR_BUFFER_BIT),t.drawArrays(t.TRIANGLES,0,6);let o=this.textures.get(v);o&&!e?.skipHistoryWrite&&(t.bindTexture(t.TEXTURE_2D_ARRAY,o.texture),t.copyTexSubImage3D(t.TEXTURE_2D_ARRAY,0,0,0,o.history.writeIndex,0,0,s,i)),t.bindFramebuffer(t.READ_FRAMEBUFFER,this.intermediateFbo),t.bindFramebuffer(t.DRAW_FRAMEBUFFER,null),t.blitFramebuffer(0,0,s,i,0,0,s,i,t.COLOR_BUFFER_BIT,t.NEAREST),t.bindFramebuffer(t.FRAMEBUFFER,null),this.emitHook("afterDraw",...arguments)}step(e,t){this.emitHook("beforeStep",...arguments);let s={};this.uniforms.has("u_time")&&(s.u_time=e),this.uniforms.has("u_frame")&&(s.u_frame=this.frame),this.updateUniforms(s),this.draw(t);let i=this.textures.get(v);if(i&&!t?.skipHistoryWrite){let{writeIndex:r,depth:o}=i.history;this.updateUniforms({[`${x(v)}FrameOffset`]:r}),i.history.writeIndex=(r+1)%o}++this.frame,this.emitHook("afterStep",...arguments)}play(e,t){this.pause();let s=i=>{i=(i-this.startTime)/1e3;let r=t?.(i,this.frame)??void 0;this.step(i,r),this.animationFrameId=requestAnimationFrame(s),e?.(i,this.frame)};this.animationFrameId=requestAnimationFrame(s),this.emitHook("play")}pause(){this.animationFrameId&&(cancelAnimationFrame(this.animationFrameId),this.animationFrameId=null),this.emitHook("pause")}reset(){this.frame=0,this.startTime=performance.now(),this.textures.forEach(e=>{e.history&&(e.history.writeIndex=0,this.clearHistoryTextureLayers(e))}),this.emitHook("reset")}destroy(){this.emitHook("destroy"),this.animationFrameId&&(cancelAnimationFrame(this.animationFrameId),this.animationFrameId=null),this.resolutionObserver.disconnect(),this.resizeObserver.disconnect(),this.canvas instanceof HTMLCanvasElement&&this.eventListeners.forEach((e,t)=>{this.canvas.removeEventListener(t,e)}),this.program&&this.gl.deleteProgram(this.program),this.intermediateFbo&&(this.gl.deleteFramebuffer(this.intermediateFbo),this.intermediateFbo=null),this.textures.forEach(e=>{this.gl.deleteTexture(e.texture)}),this.textureUnitPool.free=[],this.textureUnitPool.next=0,this.buffer&&(this.gl.deleteBuffer(this.buffer),this.buffer=null),this.isInternalCanvas&&this.canvas instanceof HTMLCanvasElement&&this.canvas.remove()}},_=E;export{_ as a};
10
+ //# sourceMappingURL=chunk-5CBGNOA3.mjs.map