shaderpad 1.0.0-beta.38 → 1.0.0-beta.40

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -15,7 +15,6 @@ npm install shaderpad
15
15
  ```typescript
16
16
  import ShaderPad from 'shaderpad';
17
17
 
18
- // Your custom GLSL fragment shader code.
19
18
  const fragmentShaderSrc = `#version 300 es
20
19
  precision highp float;
21
20
 
@@ -35,43 +34,24 @@ void main() {
35
34
  vec2 dotGrid = mod(uv, 50.) - 25.;
36
35
  float dotDist = length(dotGrid);
37
36
  float dot = step(dotDist, 5.);
38
-
39
37
  float cursorDist = distance(uv, u_cursor * u_resolution);
40
38
  float cursor = step(cursorDist, 25. + sin(u_time * 5.) * 5.);
41
-
42
39
  vec3 color = mix(vec3(0., 0., 1.), vec3(1.), dot);
43
40
  color = mix(color, u_cursorColor, cursor);
44
-
45
41
  outColor = vec4(color, 1.);
46
42
  }
47
43
  `;
48
44
 
49
- // Initialize the shader.
50
45
  const shader = new ShaderPad(fragmentShaderSrc /* , options */);
51
-
52
- // Add your own custom uniforms.
53
46
  const getColor = (time: number) =>
54
47
  [time, time + (Math.PI * 2) / 3, time + (Math.PI * 4) / 3].map(x => 1 + Math.sin(x) / 2);
55
48
  shader.initializeUniform('u_cursorColor', 'float', getColor(0));
56
-
57
- // Start the render loop.
58
49
  shader.play(time => {
59
50
  shader.updateUniforms({ u_cursorColor: getColor(time) });
60
51
  });
61
-
62
- // Optionally pause or reset the render loop.
63
- // shader.pause();
64
- // shader.reset();
65
-
66
- // ShaderPad also attaches a throttled resize observer that you can hook into.
67
- // It fires when the canvas size changes visually. If you supplied a custom
68
- // canvas, you may use this to update its `width` and `height` attributes.
69
- // shader.onResize = (width, height) => {
70
- // console.log('Canvas resized:', width, height);
71
- // };
72
52
  ```
73
53
 
74
- See the [`examples/` directory](./examples/) for more.
54
+ See the [`examples/` directory](./examples/src) for more.
75
55
 
76
56
  ## Usage
77
57
 
@@ -79,26 +59,26 @@ See the [`examples/` directory](./examples/) for more.
79
59
 
80
60
  #### `initializeUniform(name, type, value, options?)`
81
61
 
82
- Initialize a uniform variable. The uniform must be declared in your fragment shader.
62
+ Initialize a uniform, which must be declared in your shader.
83
63
 
84
64
  ```typescript
85
- // Initialize a float uniform.
86
65
  shader.initializeUniform('u_speed', 'float', 1.5);
87
-
88
- // Initialize a vec3 uniform.
66
+ // Vectors are passed as arrays. This is a vec3:
89
67
  shader.initializeUniform('u_color', 'float', [1.0, 0.5, 0.0]);
68
+ // …but you can also pass an array. This is an array of floats:
69
+ shader.initializeUniform('u_data', 'float', [1.0, 0.5, 0.0], { arrayLength: 3 });
90
70
  ```
91
71
 
92
72
  **Parameters:**
93
73
 
94
- - `name` (string): The name of the uniform as declared in your shader
95
- - `type` ('float' | 'int'): The uniform type
74
+ - `name` (string): Uniform name
75
+ - `type` ('float' | 'int'): Uniform type
96
76
  - `value` (number | number[] | (number | number[])[]): Initial value(s)
97
- - `options` (optional): `{ arrayLength?: number }` - Required for uniform arrays
77
+ - `options` (optional): `{ arrayLength?: number }` - Required for arrays
98
78
 
99
79
  #### `updateUniforms(updates, options?)`
100
80
 
101
- Update one or more uniform values.
81
+ Update uniform values.
102
82
 
103
83
  ```typescript
104
84
  shader.updateUniforms({
@@ -109,7 +89,7 @@ shader.updateUniforms({
109
89
 
110
90
  **Parameters:**
111
91
 
112
- - `updates` (Record<string, number | number[] | (number | number[])[]>): Object mapping uniform names to their new values
92
+ - `updates`: Object mapping uniform names to their new values
113
93
  - `options` (optional): `{ startIndex?: number }` - Starting index for partial array updates
114
94
 
115
95
  #### Uniform arrays
@@ -125,12 +105,8 @@ shader.initializeUniform(
125
105
  [0, 0],
126
106
  [1, 1],
127
107
  [2, 2],
128
- [3, 3],
129
- [4, 4],
130
108
  ],
131
- {
132
- arrayLength: 5,
133
- }
109
+ { arrayLength: 3 }
134
110
  );
135
111
 
136
112
  // Update all elements.
@@ -139,8 +115,6 @@ shader.updateUniforms({
139
115
  [0.1, 0.2],
140
116
  [1.1, 1.2],
141
117
  [2.1, 2.2],
142
- [3.1, 3.2],
143
- [4.1, 4.2],
144
118
  ],
145
119
  });
146
120
 
@@ -178,25 +152,13 @@ shader.updateUniforms(
178
152
 
179
153
  #### `initializeTexture(name, source, options?)`
180
154
 
181
- Initialize a texture from an image, video, canvas element, or typed array.
155
+ Initialize a texture from an image, video, canvas, or typed array.
182
156
 
183
157
  ```typescript
184
- // Initialize a texture from an image.
185
- const img = new Image();
186
- img.src = 'texture.png';
187
- img.onload = () => {
188
- shader.initializeTexture('u_texture', img);
189
- };
190
-
191
- // Initialize a texture from a typed array (Float32Array, Uint8Array, etc.).
192
- const data = new Float32Array(width * height * 4); // RGBA data
158
+ shader.initializeTexture('u_texture', img);
193
159
  shader.initializeTexture(
194
160
  'u_custom',
195
- {
196
- data,
197
- width,
198
- height,
199
- },
161
+ { data: new Float32Array(width * height * 4), width, height },
200
162
  {
201
163
  internalFormat: gl.RGBA32F,
202
164
  type: gl.FLOAT,
@@ -204,19 +166,15 @@ shader.initializeTexture(
204
166
  magFilter: gl.NEAREST,
205
167
  }
206
168
  );
207
-
208
- // Initialize a texture with history (stores previous frames).
209
169
  shader.initializeTexture('u_webcam', videoElement, { history: 30 });
210
-
211
- // Preserve Y orientation for DOM sources (don't flip vertically).
212
170
  shader.initializeTexture('u_canvas', canvasElement, { preserveY: true });
213
171
  ```
214
172
 
215
173
  **Parameters:**
216
174
 
217
175
  - `name` (string): The name of the texture uniform as declared in your shader
218
- - `source` (HTMLImageElement | HTMLVideoElement | HTMLCanvasElement | CustomTexture): The texture source
219
176
  - `options` (optional): Texture options (see below)
177
+ - `source`: Image, video, canvas, or `{ data, width, height }`
220
178
 
221
179
  **Texture Options:**
222
180
 
@@ -232,52 +190,85 @@ shader.initializeTexture('u_canvas', canvasElement, { preserveY: true });
232
190
 
233
191
  **Note:** For typed array sources (`CustomTexture`), you must provide data in bottom-up orientation (WebGL convention). The `preserveY` option is ignored for typed arrays.
234
192
 
235
- #### `updateTextures(updates)`
236
-
237
- Update one or more textures. Useful for updating video textures each frame.
193
+ #### `updateTextures(updates, options?)`
238
194
 
239
195
  ```typescript
196
+ shader.updateTextures({ u_webcam: videoElement, u_overlay: overlayCanvas });
240
197
  shader.updateTextures({
241
- u_webcam: videoElement,
242
- u_overlay: overlayCanvas,
243
- u_custom: {
244
- data: typedArray,
245
- width,
246
- height,
247
- },
248
- });
249
-
250
- // Typed arrays can be partially updated.
251
- shader.updateTextures({
252
- u_custom: {
253
- data: partialData,
254
- width: regionWidth,
255
- height: regionHeight,
256
- isPartial: true,
257
- x: offsetX,
258
- y: offsetY,
259
- },
198
+ u_custom: { data: partialData, width, height, isPartial: true, x: offsetX, y: offsetY },
260
199
  });
200
+ shader.updateTextures({ u_camera: videoElement }, { skipHistoryWrite: true });
261
201
  ```
262
202
 
263
203
  **Parameters:**
264
204
 
265
- - `updates` (Record<string, TextureSource | PartialCustomTexture>): Object mapping texture names to their new sources
205
+ - `updates`: Object mapping texture names to updated sources
206
+ - `options?` (optional): `{ skipHistoryWrite?: boolean }`
266
207
 
267
208
  ### Lifecycle methods
268
209
 
269
- #### `play(callback?)`
210
+ #### `play(onStepComplete?, setStepOptions?)`
270
211
 
271
- Start the render loop. The callback is invoked each frame with the current time and frame number.
212
+ Start the render loop.
272
213
 
273
214
  ```typescript
274
215
  shader.play();
275
- // Can optionally take a callback to invoke each frame.
216
+
217
+ // With per-frame callbacks.
276
218
  shader.play((time, frame) => {
277
219
  shader.updateTextures({ u_webcam: videoElement });
278
220
  });
221
+
222
+ shader.play(null, (time, frame) => {
223
+ // Only save every 10th frame to history.
224
+ return { skipHistoryWrite: frame % 10 === 0 };
225
+ });
226
+ ```
227
+
228
+ **Parameters:**
229
+
230
+ - `onStepComplete?`: `(time: number, frame: number) => void` - Called after each frame
231
+ - `setStepOptions?`: `(time: number, frame: number) => StepOptions | void` - Called before each frame
232
+
233
+ #### `step(time, options?)`
234
+
235
+ Manually advance one frame. Updates `u_time` and `u_frame`, then renders.
236
+
237
+ ```typescript
238
+ shader.step(5.0); // Render at 5 seconds.
239
+ ```
240
+
241
+ **Parameters:**
242
+
243
+ - `time` (number): The current time in seconds
244
+ - `options?` (optional): `StepOptions` - Options to control rendering behavior (see below)
245
+
246
+ #### `draw(options?)`
247
+
248
+ Render without updating uniforms or frame counter.
249
+
250
+ ```typescript
251
+ shader.draw({ skipClear: true });
252
+ ```
253
+
254
+ **Parameters:**
255
+
256
+ - `options?` (optional): `StepOptions` - Options to control rendering behavior (see below)
257
+
258
+ #### `StepOptions`
259
+
260
+ ```typescript
261
+ interface StepOptions {
262
+ skipClear?: boolean; // Skip clearing canvas before rendering
263
+ skipHistoryWrite?: boolean; // Skip writing to history buffers
264
+ }
279
265
  ```
280
266
 
267
+ **Options:**
268
+
269
+ - `skipClear?: boolean` - If `true`, the canvas is not cleared before rendering. Useful for accumulating effects or multi-pass rendering.
270
+ - `skipHistoryWrite?: boolean` - If `true`, history buffers are not updated. Useful when you want to render without affecting the history state.
271
+
281
272
  #### `pause()`, `reset()`, `destroy()`
282
273
 
283
274
  ```typescript
@@ -310,7 +301,7 @@ shader.onResize = (width, height) => {
310
301
 
311
302
  ### history
312
303
 
313
- The `history` option enables framebuffer history, allowing you to access previous frames in your shader. Pass a number to specify how many frames to keep.
304
+ Enable framebuffer history to access previous frames.
314
305
 
315
306
  ```typescript
316
307
  // Store the last 10 frames of shader output.
@@ -353,380 +344,149 @@ vec4 historyColor = texture(u_webcam, vec3(v_uv, zIndex));
353
344
 
354
345
  ### debug
355
346
 
356
- The `debug` option controls whether debug logging is enabled. When enabled, ShaderPad will log warnings when uniforms or textures are not found in the shader. Defaults to `true` in development (when `process.env.NODE_ENV !== 'production'`) and `false` in production builds.
347
+ Enable debug logging. Defaults to `true` in development, `false` in production.
357
348
 
358
349
  ```typescript
359
- const shader = new ShaderPad(fragmentShaderSrc, { debug: true }); // Explicitly enable debug logging.
350
+ const shader = new ShaderPad(fragmentShaderSrc, { debug: true });
360
351
  ```
361
352
 
362
353
  ### plugins
363
354
 
364
- ShaderPad supports plugins to add additional functionality. Plugins are imported from separate paths to keep bundle sizes small.
355
+ Plugins add additional functionality. Imported from separate paths to keep bundle sizes small.
365
356
 
366
357
  #### helpers
367
358
 
368
- The `helpers` plugin provides convenience functions and constants. See [helpers.glsl](./src/plugins/helpers.glsl) for the implementation.
359
+ Convenience functions and constants. See [helpers.glsl](./src/plugins/helpers.glsl).
369
360
 
370
361
  ```typescript
371
- import ShaderPad from 'shaderpad';
372
362
  import helpers from 'shaderpad/plugins/helpers';
373
-
374
- const shader = new ShaderPad(fragmentShaderSrc, {
375
- plugins: [helpers()],
376
- });
363
+ const shader = new ShaderPad(fragmentShaderSrc, { plugins: [helpers()] });
377
364
  ```
378
365
 
379
- **Note:** The `helpers` plugin automatically injects the `u_resolution` uniform into your shader. Do not declare it yourself.
366
+ **Note:** Automatically injects `u_resolution`. Don't declare it yourself.
380
367
 
381
368
  #### save
382
369
 
383
- The `save` plugin adds a `.save()` method to the shader that saves the current frame to a PNG file. It works on desktop and mobile.
370
+ Adds `.save()` method to save the current frame as PNG.
384
371
 
385
372
  ```typescript
386
- import ShaderPad from 'shaderpad';
387
373
  import save, { WithSave } from 'shaderpad/plugins/save';
388
-
389
374
  const shader = new ShaderPad(fragmentShaderSrc, { plugins: [save()] }) as WithSave<ShaderPad>;
390
375
  shader.save('filename', 'Optional mobile share text');
391
376
  ```
392
377
 
393
378
  #### face
394
379
 
395
- The `face` plugin uses [MediaPipe](https://ai.google.dev/edge/mediapipe/solutions/vision/face_landmarker) to detect faces in video or image textures.
380
+ Uses [MediaPipe Face Landmarker](https://ai.google.dev/edge/mediapipe/solutions/vision/face_landmarker) to detect faces.
396
381
 
397
382
  ```typescript
398
- import ShaderPad from 'shaderpad';
399
383
  import face from 'shaderpad/plugins/face';
400
-
401
384
  const shader = new ShaderPad(fragmentShaderSrc, {
402
- plugins: [
403
- face({
404
- textureName: 'u_webcam',
405
- options: { maxFaces: 3 },
406
- }),
407
- ],
385
+ plugins: [face({ textureName: 'u_webcam', options: { maxFaces: 3 } })],
408
386
  });
409
387
  ```
410
388
 
411
- **Options:**
412
-
413
- - `onReady?: () => void` - Callback invoked when initialization is complete and the detection model is loaded
414
- - `onResults?: (results: FaceLandmarkerResult) => void` - Callback invoked with detection results each frame
415
-
416
- **Uniforms:**
417
-
418
- | Uniform | Type | Description |
419
- | -------------------- | --------- | --------------------------------------------------------------------------- |
420
- | `u_maxFaces` | int | Maximum number of faces to detect |
421
- | `u_nFaces` | int | Current number of detected faces |
422
- | `u_faceLandmarksTex` | sampler2D | Raw landmark data texture (use `faceLandmark()` to access) |
423
- | `u_faceMask` | sampler2D | Face mask texture (R: region type, G: confidence, B: normalized face index) |
424
-
425
- **Helper functions:**
426
-
427
- All region functions return `vec2(confidence, faceIndex)`. faceIndex is 0-indexed (-1 = no face).
428
-
429
- - `faceLandmark(int faceIndex, int landmarkIndex) -> vec4` - Returns landmark data as `vec4(x, y, z, visibility)`. Use `vec2(faceLandmark(...))` to get just the screen position.
430
- - `leftEyebrowAt(vec2 pos) -> vec2` - Returns `vec2(1.0, faceIndex)` if position is in left eyebrow, `vec2(0.0, -1.0)` otherwise.
431
- - `rightEyebrowAt(vec2 pos) -> vec2` - Returns `vec2(1.0, faceIndex)` if position is in right eyebrow, `vec2(0.0, -1.0)` otherwise.
432
- - `leftEyeAt(vec2 pos) -> vec2` - Returns `vec2(1.0, faceIndex)` if position is in left eye, `vec2(0.0, -1.0)` otherwise.
433
- - `rightEyeAt(vec2 pos) -> vec2` - Returns `vec2(1.0, faceIndex)` if position is in right eye, `vec2(0.0, -1.0)` otherwise.
434
- - `lipsAt(vec2 pos) -> vec2` - Returns `vec2(1.0, faceIndex)` if position is in lips, `vec2(0.0, -1.0)` otherwise.
435
- - `outerMouthAt(vec2 pos) -> vec2` - Returns `vec2(1.0, faceIndex)` if position is in outer mouth (lips + inner mouth), `vec2(0.0, -1.0)` otherwise.
436
- - `innerMouthAt(vec2 pos) -> vec2` - Returns `vec2(1.0, faceIndex)` if position is in inner mouth region, `vec2(0.0, -1.0)` otherwise.
437
- - `faceOvalAt(vec2 pos) -> vec2` - Returns `vec2(1.0, faceIndex)` if position is in face oval contour, `vec2(0.0, -1.0)` otherwise.
438
- - `faceAt(vec2 pos) -> vec2` - Returns `vec2(1.0, faceIndex)` if position is in face mesh or oval contour, `vec2(0.0, -1.0)` otherwise.
439
- - `eyeAt(vec2 pos) -> vec2` - Returns `vec2(1.0, faceIndex)` if position is in either eye, `vec2(0.0, -1.0)` otherwise.
440
- - `eyebrowAt(vec2 pos) -> vec2` - Returns `vec2(1.0, faceIndex)` if position is in either eyebrow, `vec2(0.0, -1.0)` otherwise.
441
-
442
- **Convenience functions** (return `1.0` if true, `0.0` if false):
389
+ **Options:** `onReady?: () => void`, `onResults?: (results: FaceLandmarkerResult) => void`
443
390
 
444
- - `inFace(vec2 pos) -> float` - Returns `1.0` if position is in face mesh, `0.0` otherwise.
445
- - `inEye(vec2 pos) -> float` - Returns `1.0` if position is in either eye, `0.0` otherwise.
446
- - `inEyebrow(vec2 pos) -> float` - Returns `1.0` if position is in either eyebrow, `0.0` otherwise.
447
- - `inOuterMouth(vec2 pos) -> float` - Returns `1.0` if position is in outer mouth (lips + inner mouth), `0.0` otherwise.
448
- - `inInnerMouth(vec2 pos) -> float` - Returns `1.0` if position is in inner mouth, `0.0` otherwise.
449
- - `inLips(vec2 pos) -> float` - Returns `1.0` if position is in lips, `0.0` otherwise.
391
+ **Uniforms:** `u_maxFaces` (int), `u_nFaces` (int), `u_faceLandmarksTex` (sampler2D), `u_faceMask` (sampler2D)
450
392
 
451
- **Landmark Constants:**
393
+ **Helper functions:** All region functions return `vec2(confidence, faceIndex)`. faceIndex is 0-indexed (-1 = no face).
452
394
 
453
- - `FACE_LANDMARK_L_EYE_CENTER` - Left eye center landmark index
454
- - `FACE_LANDMARK_R_EYE_CENTER` - Right eye center landmark index
455
- - `FACE_LANDMARK_NOSE_TIP` - Nose tip landmark index
456
- - `FACE_LANDMARK_FACE_CENTER` - Face center landmark index (custom, calculated from all landmarks)
457
- - `FACE_LANDMARK_MOUTH_CENTER` - Mouth center landmark index (custom, calculated from inner mouth landmarks)
395
+ - `faceLandmark(int faceIndex, int landmarkIndex) -> vec4` - Returns `vec4(x, y, z, visibility)`
396
+ - `leftEyebrowAt(vec2 pos) -> vec2`, `rightEyebrowAt(vec2 pos) -> vec2`
397
+ - `leftEyeAt(vec2 pos) -> vec2`, `rightEyeAt(vec2 pos) -> vec2`
398
+ - `lipsAt(vec2 pos) -> vec2`, `outerMouthAt(vec2 pos) -> vec2`, `innerMouthAt(vec2 pos) -> vec2`
399
+ - `faceOvalAt(vec2 pos) -> vec2`, `faceAt(vec2 pos) -> vec2`
400
+ - `eyeAt(vec2 pos) -> vec2`, `eyebrowAt(vec2 pos) -> vec2`
458
401
 
459
- **Example usage:**
402
+ **Convenience functions:** `inFace(vec2 pos) -> float`, `inEye(vec2 pos) -> float`, `inEyebrow(vec2 pos) -> float`, `inOuterMouth(vec2 pos) -> float`, `inInnerMouth(vec2 pos) -> float`, `inLips(vec2 pos) -> float`
460
403
 
461
- ```glsl
462
- // Get a specific landmark position.
463
- vec2 nosePos = vec2(faceLandmark(0, FACE_LANDMARK_NOSE_TIP));
464
-
465
- // Use in* convenience functions for simple boolean checks.
466
- float eyeMask = inEye(v_uv);
467
-
468
- // Use faceLandmark or *At functions when you need to check a specific face index.
469
- vec2 leftEye = leftEyeAt(v_uv);
470
- for (int i = 0; i < u_nFaces; ++i) {
471
- vec4 leftEyeCenter = faceLandmark(i, FACE_LANDMARK_L_EYE_CENTER);
472
- vec4 rightEyeCenter = faceLandmark(i, FACE_LANDMARK_R_EYE_CENTER);
473
- if (leftEye.x > 0.0 && int(leftEye.y) == i) {
474
- // Position is inside the left eye of face i.
475
- }
476
- // ...
477
- }
478
- ```
404
+ **Constants:** `FACE_LANDMARK_L_EYE_CENTER`, `FACE_LANDMARK_R_EYE_CENTER`, `FACE_LANDMARK_NOSE_TIP`, `FACE_LANDMARK_FACE_CENTER`, `FACE_LANDMARK_MOUTH_CENTER`
479
405
 
480
- [Landmark indices are documented here.](https://ai.google.dev/edge/mediapipe/solutions/vision/face_landmarker#face_landmarker_model) This library adds two custom landmarks: `FACE_CENTER` and `MOUTH_CENTER`. This brings the total landmark count to 480.
481
-
482
- **Note:** The face plugin requires `@mediapipe/tasks-vision` as a peer dependency.
406
+ **Note:** Requires `@mediapipe/tasks-vision` as a peer dependency. Adds two custom landmarks (`FACE_CENTER`, `MOUTH_CENTER`), bringing total to 480.
483
407
 
484
408
  #### pose
485
409
 
486
- The `pose` plugin uses [MediaPipe Pose Landmarker](https://ai.google.dev/edge/mediapipe/solutions/vision/pose_landmarker) to expose a flat array of 2D landmarks. Each pose contributes 39 landmarks (33 standard + 6 custom), enumerated below.
410
+ Uses [MediaPipe Pose Landmarker](https://ai.google.dev/edge/mediapipe/solutions/vision/pose_landmarker). Each pose contributes 39 landmarks (33 standard + 6 custom).
487
411
 
488
412
  ```typescript
489
- import ShaderPad from 'shaderpad';
490
413
  import pose from 'shaderpad/plugins/pose';
491
-
492
414
  const shader = new ShaderPad(fragmentShaderSrc, {
493
415
  plugins: [pose({ textureName: 'u_video', options: { maxPoses: 3 } })],
494
416
  });
495
417
  ```
496
418
 
497
- **Options:**
498
-
499
- - `onReady?: () => void` - Callback invoked when initialization is complete and the detection model is loaded
500
- - `onResults?: (results: PoseLandmarkerResult) => void` - Callback invoked with detection results each frame
419
+ **Options:** `onReady?: () => void`, `onResults?: (results: PoseLandmarkerResult) => void`
501
420
 
502
- **Uniforms:**
503
-
504
- | Uniform | Type | Description |
505
- | -------------------- | --------- | ----------------------------------------------------------------------------- |
506
- | `u_maxPoses` | int | Maximum number of poses to track |
507
- | `u_nPoses` | int | Current number of detected poses |
508
- | `u_poseLandmarksTex` | sampler2D | Raw landmark data texture (RGBA: x, y, z, visibility) |
509
- | `u_poseMask` | sampler2D | Pose mask texture (R: body detected, G: confidence, B: normalized pose index) |
421
+ **Uniforms:** `u_maxPoses` (int), `u_nPoses` (int), `u_poseLandmarksTex` (sampler2D), `u_poseMask` (sampler2D)
510
422
 
511
423
  **Helper functions:**
512
424
 
513
- - `poseLandmark(int poseIndex, int landmarkIndex) -> vec4` - Returns landmark data as `vec4(x, y, z, visibility)`. Use `vec2(poseLandmark(...))` to get just the screen position.
514
- - `poseAt(vec2 pos) -> vec2` - Returns `vec2(confidence, poseIndex)`. poseIndex is 0-indexed (-1 = no pose), confidence is the segmentation confidence.
515
- - `inPose(vec2 pos) -> float` - Returns `1.0` if position is in any pose, `0.0` otherwise.
516
-
517
- **Constants:**
518
-
519
- - `POSE_LANDMARK_LEFT_EYE` - Left eye landmark index (2)
520
- - `POSE_LANDMARK_RIGHT_EYE` - Right eye landmark index (5)
521
- - `POSE_LANDMARK_LEFT_SHOULDER` - Left shoulder landmark index (11)
522
- - `POSE_LANDMARK_RIGHT_SHOULDER` - Right shoulder landmark index (12)
523
- - `POSE_LANDMARK_LEFT_ELBOW` - Left elbow landmark index (13)
524
- - `POSE_LANDMARK_RIGHT_ELBOW` - Right elbow landmark index (14)
525
- - `POSE_LANDMARK_LEFT_HIP` - Left hip landmark index (23)
526
- - `POSE_LANDMARK_RIGHT_HIP` - Right hip landmark index (24)
527
- - `POSE_LANDMARK_LEFT_KNEE` - Left knee landmark index (25)
528
- - `POSE_LANDMARK_RIGHT_KNEE` - Right knee landmark index (26)
529
- - `POSE_LANDMARK_BODY_CENTER` - Body center landmark index (33, custom, calculated from all landmarks)
530
- - `POSE_LANDMARK_LEFT_HAND_CENTER` - Left hand center landmark index (34, custom, calculated from pinky, thumb, wrist, index)
531
- - `POSE_LANDMARK_RIGHT_HAND_CENTER` - Right hand center landmark index (35, custom, calculated from pinky, thumb, wrist, index)
532
- - `POSE_LANDMARK_LEFT_FOOT_CENTER` - Left foot center landmark index (36, custom, calculated from ankle, heel, foot index)
533
- - `POSE_LANDMARK_RIGHT_FOOT_CENTER` - Right foot center landmark index (37, custom, calculated from ankle, heel, foot index)
534
- - `POSE_LANDMARK_TORSO_CENTER` - Torso center landmark index (38, custom, calculated from shoulders and hips)
535
-
536
- **Note:** For connecting pose landmarks (e.g., drawing skeleton lines), `PoseLandmarker.POSE_CONNECTIONS` from `@mediapipe/tasks-vision` provides an array of `{ start, end }` pairs that define which landmarks should be connected.
537
-
538
- Use `poseLandmark(int poseIndex, int landmarkIndex)` in GLSL to retrieve a specific point. Landmark indices are:
539
-
540
- | Index | Landmark | Index | Landmark |
541
- | ----- | ----------------- | ----- | -------------------------- |
542
- | 0 | nose | 20 | right index |
543
- | 1 | left eye (inner) | 21 | left thumb |
544
- | 2 | left eye | 22 | right thumb |
545
- | 3 | left eye (outer) | 23 | left hip |
546
- | 4 | right eye (inner) | 24 | right hip |
547
- | 5 | right eye | 25 | left knee |
548
- | 6 | right eye (outer) | 26 | right knee |
549
- | 7 | left ear | 27 | left ankle |
550
- | 8 | right ear | 28 | right ankle |
551
- | 9 | mouth (left) | 29 | left heel |
552
- | 10 | mouth (right) | 30 | right heel |
553
- | 11 | left shoulder | 31 | left foot index |
554
- | 12 | right shoulder | 32 | right foot index |
555
- | 13 | left elbow | 33 | body center (custom) |
556
- | 14 | right elbow | 34 | left hand center (custom) |
557
- | 15 | left wrist | 35 | right hand center (custom) |
558
- | 16 | right wrist | 36 | left foot center (custom) |
559
- | 17 | left pinky | 37 | right foot center (custom) |
560
- | 18 | right pinky | 38 | torso center (custom) |
561
- | 19 | left index | | |
562
-
563
- [Source](https://ai.google.dev/edge/mediapipe/solutions/vision/pose_landmarker#pose_landmarker_model)
564
-
565
- [Landmark indices are documented here.](https://ai.google.dev/edge/mediapipe/solutions/vision/pose_landmarker#pose_landmarker_model) This library adds six custom landmarks: `BODY_CENTER`, `LEFT_HAND_CENTER`, `RIGHT_HAND_CENTER`, `LEFT_FOOT_CENTER`, `RIGHT_FOOT_CENTER`, and `TORSO_CENTER`. This brings the total landmark count to 39.
566
-
567
- A minimal fragment shader loop looks like:
425
+ - `poseLandmark(int poseIndex, int landmarkIndex) -> vec4` - Returns `vec4(x, y, z, visibility)`
426
+ - `poseAt(vec2 pos) -> vec2` - Returns `vec2(confidence, poseIndex)`
427
+ - `inPose(vec2 pos) -> float`
568
428
 
569
- ```glsl
570
- for (int i = 0; i < u_maxPoses; ++i) {
571
- if (i >= u_nPoses) break;
572
- vec2 leftHip = vec2(poseLandmark(i, POSE_LANDMARK_LEFT_HIP));
573
- vec2 rightHip = vec2(poseLandmark(i, POSE_LANDMARK_RIGHT_HIP));
574
- // …
575
- }
576
- ```
429
+ **Constants:** `POSE_LANDMARK_LEFT_EYE`, `POSE_LANDMARK_RIGHT_EYE`, `POSE_LANDMARK_LEFT_SHOULDER`, `POSE_LANDMARK_RIGHT_SHOULDER`, `POSE_LANDMARK_LEFT_ELBOW`, `POSE_LANDMARK_RIGHT_ELBOW`, `POSE_LANDMARK_LEFT_HIP`, `POSE_LANDMARK_RIGHT_HIP`, `POSE_LANDMARK_LEFT_KNEE`, `POSE_LANDMARK_RIGHT_KNEE`, `POSE_LANDMARK_BODY_CENTER`, `POSE_LANDMARK_LEFT_HAND_CENTER`, `POSE_LANDMARK_RIGHT_HAND_CENTER`, `POSE_LANDMARK_LEFT_FOOT_CENTER`, `POSE_LANDMARK_RIGHT_FOOT_CENTER`, `POSE_LANDMARK_TORSO_CENTER`
430
+
431
+ **Note:** Requires `@mediapipe/tasks-vision`. Use `PoseLandmarker.POSE_CONNECTIONS` for skeleton connections. [Landmark indices](https://ai.google.dev/edge/mediapipe/solutions/vision/pose_landmarker#pose_landmarker_model)
577
432
 
578
433
  #### hands
579
434
 
580
- The `hands` plugin uses [MediaPipe Hand Landmarker](https://ai.google.dev/edge/mediapipe/solutions/vision/hand_landmarker) to expose a flat array of 2D landmarks. Each hand contributes 22 landmarks, enumerated below.
435
+ Uses [MediaPipe Hand Landmarker](https://ai.google.dev/edge/mediapipe/solutions/vision/hand_landmarker). Each hand contributes 22 landmarks.
581
436
 
582
437
  ```typescript
583
- import ShaderPad from 'shaderpad';
584
438
  import hands from 'shaderpad/plugins/hands';
585
-
586
439
  const shader = new ShaderPad(fragmentShaderSrc, {
587
440
  plugins: [hands({ textureName: 'u_video', options: { maxHands: 2 } })],
588
441
  });
589
442
  ```
590
443
 
591
- **Options:**
592
-
593
- - `onReady?: () => void` - Callback invoked when initialization is complete and the detection model is loaded
594
- - `onResults?: (results: HandLandmarkerResult) => void` - Callback invoked with detection results each frame
444
+ **Options:** `onReady?: () => void`, `onResults?: (results: HandLandmarkerResult) => void`
595
445
 
596
- **Uniforms:**
597
-
598
- | Uniform | Type | Description |
599
- | -------------------- | --------- | ----------------------------------------------------- |
600
- | `u_maxHands` | int | Maximum number of hands to track |
601
- | `u_nHands` | int | Current number of detected hands |
602
- | `u_handLandmarksTex` | sampler2D | Raw landmark data texture (RGBA: x, y, z, handedness) |
446
+ **Uniforms:** `u_maxHands` (int), `u_nHands` (int), `u_handLandmarksTex` (sampler2D)
603
447
 
604
448
  **Helper functions:**
605
449
 
606
- - `handLandmark(int handIndex, int landmarkIndex) -> vec4` - Returns landmark data as `vec4(x, y, z, handedness)`. Use `vec2(handLandmark(...))` to get just the screen position. Handedness: 0.0 = left hand, 1.0 = right hand.
607
- - `isRightHand(int handIndex) -> float` - Returns 1.0 if the hand is a right hand, 0.0 if left.
608
- - `isLeftHand(int handIndex) -> float` - Returns 1.0 if the hand is a left hand, 0.0 if right.
609
-
610
- **Landmark Indices:**
611
-
612
- | Index | Landmark | Index | Landmark |
613
- | ----- | ----------------- | ----- | ----------------- |
614
- | 0 | WRIST | 11 | MIDDLE_FINGER_DIP |
615
- | 1 | THUMB_CMC | 12 | MIDDLE_FINGER_TIP |
616
- | 2 | THUMB_MCP | 13 | RING_FINGER_MCP |
617
- | 3 | THUMB_IP | 14 | RING_FINGER_PIP |
618
- | 4 | THUMB_TIP | 15 | RING_FINGER_DIP |
619
- | 5 | INDEX_FINGER_MCP | 16 | RING_FINGER_TIP |
620
- | 6 | INDEX_FINGER_PIP | 17 | PINKY_MCP |
621
- | 7 | INDEX_FINGER_DIP | 18 | PINKY_PIP |
622
- | 8 | INDEX_FINGER_TIP | 19 | PINKY_DIP |
623
- | 9 | MIDDLE_FINGER_MCP | 20 | PINKY_TIP |
624
- | 10 | MIDDLE_FINGER_PIP | 21 | HAND_CENTER |
450
+ - `handLandmark(int handIndex, int landmarkIndex) -> vec4` - Returns `vec4(x, y, z, handedness)`. Handedness: 0.0 = left, 1.0 = right.
451
+ - `isRightHand(int handIndex) -> float`, `isLeftHand(int handIndex) -> float`
625
452
 
626
- [Source](https://ai.google.dev/edge/mediapipe/solutions/vision/hand_landmarker#models)
627
-
628
- A minimal fragment shader loop looks like:
629
-
630
- ```glsl
631
- #define WRIST 0
632
- #define THUMB_TIP 4
633
- #define INDEX_TIP 8
634
- #define HAND_CENTER 21
635
-
636
- for (int i = 0; i < u_maxHands; ++i) {
637
- if (i >= u_nHands) break;
638
- vec2 wrist = vec2(handLandmark(i, WRIST));
639
- vec2 thumbTip = vec2(handLandmark(i, THUMB_TIP));
640
- vec2 indexTip = vec2(handLandmark(i, INDEX_TIP));
641
- vec2 handCenter = vec2(handLandmark(i, HAND_CENTER));
642
-
643
- // Use handedness for coloring (0.0 = left/black, 1.0 = right/white).
644
- vec3 handColor = vec3(isRightHand(i));
645
- // …
646
- }
647
- ```
648
-
649
- **Note:** The hands plugin requires `@mediapipe/tasks-vision` as a peer dependency.
453
+ **Note:** Requires `@mediapipe/tasks-vision`. [Landmark indices](https://ai.google.dev/edge/mediapipe/solutions/vision/hand_landmarker#models)
650
454
 
651
455
  #### segmenter
652
456
 
653
- The `segmenter` plugin uses [MediaPipe Image Segmenter](https://ai.google.dev/edge/mediapipe/solutions/vision/image_segmenter) to segment objects in video or image textures. It supports models with multiple categories (e.g., background, hair, chair, dog…). By default, it uses the [hair segmentation model](https://ai.google.dev/edge/mediapipe/solutions/vision/image_segmenter#hair-model).
457
+ Uses [MediaPipe Image Segmenter](https://ai.google.dev/edge/mediapipe/solutions/vision/image_segmenter). Supports multi-category models. Defaults to [hair segmentation model](https://ai.google.dev/edge/mediapipe/solutions/vision/image_segmenter#hair-model).
654
458
 
655
459
  ```typescript
656
- import ShaderPad from 'shaderpad';
657
460
  import segmenter from 'shaderpad/plugins/segmenter';
658
-
659
461
  const shader = new ShaderPad(fragmentShaderSrc, {
660
- plugins: [
661
- segmenter({
662
- textureName: 'u_webcam',
663
- options: {
664
- modelPath:
665
- 'https://storage.googleapis.com/mediapipe-models/image_segmenter/selfie_multiclass_256x256/float32/latest/selfie_multiclass_256x256.tflite',
666
- outputCategoryMask: true,
667
- onReady: () => {
668
- console.log('Selfie multiclass model: loading complete');
669
- },
670
- },
671
- }),
672
- ],
462
+ plugins: [segmenter({ textureName: 'u_webcam', options: { outputCategoryMask: true } })],
673
463
  });
674
464
  ```
675
465
 
676
- **Options:**
677
-
678
- - `onReady?: () => void` - Callback invoked when initialization is complete and the detection model is loaded
679
- - `onResults?: (results: ImageSegmenterResult) => void` - Callback invoked with segmentation results each frame
466
+ **Options:** `onReady?: () => void`, `onResults?: (results: ImageSegmenterResult) => void`
680
467
 
681
- **Uniforms:**
682
-
683
- | Uniform | Type | Description |
684
- | ----------------- | --------- | ----------------------------------------------------------------------- |
685
- | `u_segmentMask` | sampler2D | Segment mask texture (R: normalized category, G: confidence, B: unused) |
686
- | `u_numCategories` | int | Number of segmentation categories (including background) |
468
+ **Uniforms:** `u_segmentMask` (sampler2D), `u_numCategories` (int)
687
469
 
688
470
  **Helper functions:**
689
471
 
690
- - `segmentAt(vec2 pos) -> vec2` - Returns `vec2(confidence, categoryIndex)`. categoryIndex is 0-indexed (-1 = background). confidence is the segmentation confidence (0-1).
691
-
692
- **Example usage:**
472
+ - `segmentAt(vec2 pos) -> vec2` - Returns `vec2(confidence, categoryIndex)`. categoryIndex is 0-indexed (-1 = background).
693
473
 
694
- ```glsl
695
- vec2 segment = segmentAt(v_uv);
696
- float confidence = segment.x; // Segmentation confidence
697
- float category = segment.y; // Category index (0-indexed, -1 = background)
698
-
699
- if (category >= 0.0) {
700
- // Apply effect based on confidence.
701
- color = mix(color, vec3(1.0, 0.0, 1.0), confidence);
702
- }
703
- ```
704
-
705
- **Note:** The segmenter plugin requires `@mediapipe/tasks-vision` as a peer dependency.
474
+ **Note:** Requires `@mediapipe/tasks-vision` as a peer dependency.
706
475
 
707
476
  ## Contributing
708
477
 
709
478
  ### Running an example
710
479
 
711
480
  ```bash
712
- # Clone the repository.
713
481
  git clone https://github.com/rileyjshaw/shaderpad.git
714
- cd shaderpad
715
-
716
- # Install dependencies and start the development server.
717
- cd examples
482
+ cd shaderpad/examples
718
483
  npm install
719
484
  npm run dev
720
485
  ```
721
486
 
722
- This will launch a local server. Open the provided URL (usually `http://localhost:5173`) in your browser to view and interact with the examples. Use the select box to view different examples.
723
-
724
487
  ### Adding an example
725
488
 
726
- - Add a new `.ts` file in `examples/src/`.
727
- - Follow the structure of an existing example as a template. The example must export an `init` function and a `destroy` function.
728
- - Add the example to the `demos` array in `examples/src/main.ts`.
729
- - If your example needs images or other assets, place them in `examples/public/` and reference them with a relative path.
489
+ Add a `.ts` file in `examples/src/` that exports `init` and `destroy` functions. Add it to the `demos` array in `examples/src/main.ts`.
730
490
 
731
491
  ## License
732
492