three-mediapipe-rig 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +254 -0
- package/dist/module.d.ts +2 -0
- package/dist/module.d.ts.map +1 -0
- package/dist/three-mediapipe-rig.js +611 -0
- package/dist/three-mediapipe-rig.js.map +1 -0
- package/dist/tracking/BoneMapping.d.ts +54 -0
- package/dist/tracking/BoneMapping.d.ts.map +1 -0
- package/dist/tracking/FaceTracker.d.ts +40 -0
- package/dist/tracking/FaceTracker.d.ts.map +1 -0
- package/dist/tracking/HandTracker.d.ts +70 -0
- package/dist/tracking/HandTracker.d.ts.map +1 -0
- package/dist/tracking/PoseTracker.d.ts +57 -0
- package/dist/tracking/PoseTracker.d.ts.map +1 -0
- package/dist/tracking/Tracker.d.ts +36 -0
- package/dist/tracking/Tracker.d.ts.map +1 -0
- package/dist/tracking/util/cleanBoneName.d.ts +2 -0
- package/dist/tracking/util/cleanBoneName.d.ts.map +1 -0
- package/dist/tracking/util/getBoneByName.d.ts +3 -0
- package/dist/tracking/util/getBoneByName.d.ts.map +1 -0
- package/dist/tracking/util/getRootPosition.d.ts +10 -0
- package/dist/tracking/util/getRootPosition.d.ts.map +1 -0
- package/dist/tracking/util/lookAt.d.ts +11 -0
- package/dist/tracking/util/lookAt.d.ts.map +1 -0
- package/dist/tracking/util/vectorSign.d.ts +25 -0
- package/dist/tracking/util/vectorSign.d.ts.map +1 -0
- package/package.json +42 -0
package/README.md
ADDED
|
@@ -0,0 +1,254 @@
|
|
|
1
|
+
|
|
2
|
+

|
|
3
|
+
|
|
4
|
+
# three-mediapipe-rig
|
|
5
|
+
|
|
6
|
+
Integrate [Google MediaPipe](https://ai.google.dev/edge/mediapipe/solutions/guide) motion tracking with [Three.js](https://threejs.org/) skeletal rigs. Load a GLTF/GLB character, bind it, and drive its body, hands, and face from a webcam or video — in just a few lines of code.
|
|
7
|
+
|
|
8
|
+
This will run 3 models: face, body, hands. So expect a FPS drop.
|
|
9
|
+
|
|
10
|
+
## Features
|
|
11
|
+
|
|
12
|
+
- **Full-body pose tracking** — shoulders, arms, hips, legs, and head
|
|
13
|
+
- **Hand tracking** — individual finger bones for both hands
|
|
14
|
+
- **Face tracking** — blendshape/morph target support for facial expressions and eye movement
|
|
15
|
+
- **Automatic bone binding** — maps MediaPipe landmarks to your rig's skeleton using a configurable bone-name map
|
|
16
|
+
- **Webcam & video input** — use a live webcam feed or a pre-recorded video for motion capture
|
|
17
|
+
- **Debug tools** — preview the video/image feed overlaid with landmark visualizations
|
|
18
|
+
|
|
19
|
+
## Installation
|
|
20
|
+
```
|
|
21
|
+
npm install three-mediapipe-rig
|
|
22
|
+
```
|
|
23
|
+
|
|
24
|
+
> **Peer dependency:** [three](https://www.npmjs.com/package/three) `^0.182.0` and [mediapipe](https://www.npmjs.com/package/@mediapipe/tasks-vision) `^0.10.32` must be installed in your project.
|
|
25
|
+
|
|
26
|
+
## Quick Start
|
|
27
|
+
|
|
28
|
+
```ts
|
|
29
|
+
// 1. Create your renderer
|
|
30
|
+
const renderer = new THREE.WebGPURenderer({ antialias: true });
|
|
31
|
+
renderer.setSize(window.innerWidth, window.innerHeight);
|
|
32
|
+
document.body.appendChild(renderer.domElement);
|
|
33
|
+
|
|
34
|
+
// 2. Initialize the tracker (loads MediaPipe models)
|
|
35
|
+
await setupTracker({ ...config... })
|
|
36
|
+
|
|
37
|
+
const rig = scene.getObjectByName("rig")!;
|
|
38
|
+
|
|
39
|
+
// 3. Bind the rig to the tracker
|
|
40
|
+
const binding = tracker.bind(rig);
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
// 4. Start the webcam ( must be initialized by a user triggered event like a click )
|
|
44
|
+
tracker.start();
|
|
45
|
+
|
|
46
|
+
// 5. Update in your render loop
|
|
47
|
+
const clock = new THREE.Timer();
|
|
48
|
+
renderer.setAnimationLoop((time: number) => {
|
|
49
|
+
const delta = clock.update(time).getDelta();
|
|
50
|
+
|
|
51
|
+
// 6. update the skeleton...
|
|
52
|
+
binding?.update(delta);
|
|
53
|
+
|
|
54
|
+
renderer.render(scene, camera);
|
|
55
|
+
});
|
|
56
|
+
```
|
|
57
|
+
|
|
58
|
+
### Skeleton
|
|
59
|
+
You can use the skeleton provided in `rig.blend` or use your own and provide a bone name mapping so we know where the bones are in the second argument for the `.bind` method. But pay attention to the bone role of the provided skeleton, as it is the one expected by this module.
|
|
60
|
+
|
|
61
|
+
### Facial Animation
|
|
62
|
+
Media Pipe provides blend shape keys for the face ( estimated from the webcam ). The face it is expected to be a separated mesh, just the face, with blend shape keys named as the ones provided by Media Pipe. See [Blend Shape Keys reference](/face-blendshapekeys.md) You don't have to have all of them, if they are not found, they will be ignored.
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
## API
|
|
66
|
+
|
|
67
|
+
### `setupTracker(config?)`
|
|
68
|
+
|
|
69
|
+
Initializes MediaPipe vision models and returns a tracker API object. This is an **async** function that downloads and loads the ML models.
|
|
70
|
+
|
|
71
|
+
```ts
|
|
72
|
+
const tracker = await setupTracker({
|
|
73
|
+
ignoreLegs: true,
|
|
74
|
+
displayScale: 0.2,
|
|
75
|
+
});
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
#### `TrackerConfig` options
|
|
79
|
+
|
|
80
|
+
| Option | Type | Default | Description |
|
|
81
|
+
|---|---|---|---|
|
|
82
|
+
| `ignoreLegs` | `boolean` | `false` | Skip leg tracking (useful for seated / upper-body-only characters) |
|
|
83
|
+
| `ignoreFace` | `boolean` | `false` | Skip face tracking entirely |
|
|
84
|
+
| `displayScale` | `number` | `1` | Scale of the debug video/canvas overlay |
|
|
85
|
+
| `debugVideo` | `string` | `undefined` | Path to a video file to use instead of the webcam |
|
|
86
|
+
| `debugFrame` | `string` | `undefined` | Path to a static image for single-frame debugging |
|
|
87
|
+
| `handsTrackerOptions` | `HandLandmarkerOptions` | `undefined` | Override [MediaPipe hand landmarker options](https://ai.google.dev/edge/mediapipe/solutions/vision/hand_landmarker/web_js#configuration_options) |
|
|
88
|
+
| `modelPaths` | `object` | *(CDN URLs)* | Custom URLs for the MediaPipe WASM & model files (see below) |
|
|
89
|
+
|
|
90
|
+
#### `modelPaths`
|
|
91
|
+
|
|
92
|
+
By default, models are loaded from Google's CDN. Override individual paths if you want to self-host the assets:
|
|
93
|
+
|
|
94
|
+
```ts
|
|
95
|
+
await setupTracker({
|
|
96
|
+
modelPaths: {
|
|
97
|
+
vision: "https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision@0.10.3/wasm",
|
|
98
|
+
pose: "/models/pose_landmarker_lite.task",
|
|
99
|
+
hand: "/models/hand_landmarker.task",
|
|
100
|
+
face: "/models/face_landmarker.task",
|
|
101
|
+
},
|
|
102
|
+
});
|
|
103
|
+
```
|
|
104
|
+
|
|
105
|
+
---
|
|
106
|
+
|
|
107
|
+
### Tracker API (return value of `setupTracker`)
|
|
108
|
+
|
|
109
|
+
The object returned by `setupTracker` exposes the following:
|
|
110
|
+
|
|
111
|
+
#### `tracker.start()` → `Promise<{ stop(): void }>`
|
|
112
|
+
|
|
113
|
+
Starts the webcam feed and begins real-time tracking. Returns a handle to stop the camera.
|
|
114
|
+
|
|
115
|
+
```ts
|
|
116
|
+
const camera = await tracker.start();
|
|
117
|
+
|
|
118
|
+
// Later, to stop:
|
|
119
|
+
camera.stop();
|
|
120
|
+
```
|
|
121
|
+
|
|
122
|
+
> Handles permission errors, missing cameras, and reconnection automatically with exponential backoff.
|
|
123
|
+
|
|
124
|
+
#### `tracker.bind(rig, boneMap?)` → `BindingHandler`
|
|
125
|
+
|
|
126
|
+
Binds a Three.js skeleton rig to the tracker. This is where the magic happens — it maps MediaPipe landmarks to your character's bones for **body**, **hands**, and **face** simultaneously.
|
|
127
|
+
|
|
128
|
+
```ts
|
|
129
|
+
const rig = gltf.scene.getObjectByName("rig")!;
|
|
130
|
+
const binding = tracker.bind(rig);
|
|
131
|
+
```
|
|
132
|
+
|
|
133
|
+
- **`rig`** — The root `Object3D` of your skeleton (the armature). It must contain child bones matching the expected naming convention.
|
|
134
|
+
- **`boneMap`** *(optional)* — A custom `BoneMap` object if your rig uses different bone names (see [Bone Naming](#bone-naming) below).
|
|
135
|
+
|
|
136
|
+
#### `BindingHandler.update(delta)`
|
|
137
|
+
|
|
138
|
+
Call this every frame in your render loop to apply the tracked motion to the bound rig. The `delta` parameter (in seconds) controls the interpolation smoothness.
|
|
139
|
+
|
|
140
|
+
```ts
|
|
141
|
+
renderer.setAnimationLoop((time) => {
|
|
142
|
+
const delta = clock.update(time).getDelta();
|
|
143
|
+
binding.update(delta);
|
|
144
|
+
renderer.render(scene, camera);
|
|
145
|
+
});
|
|
146
|
+
```
|
|
147
|
+
|
|
148
|
+
#### `tracker.poseTracker` / `tracker.handsTracker` / `tracker.faceTracker`
|
|
149
|
+
|
|
150
|
+
Direct access to the individual sub-trackers if you need lower-level control. Each has a `.root` property (a `THREE.Object3D`) you can add to your scene for debugging landmark positions.
|
|
151
|
+
|
|
152
|
+
```ts
|
|
153
|
+
// Visualize the hand tracking landmarks in the 3D scene
|
|
154
|
+
scene.add(tracker.handsTracker.left.root);
|
|
155
|
+
```
|
|
156
|
+
|
|
157
|
+
---
|
|
158
|
+
|
|
159
|
+
## Bone Naming
|
|
160
|
+
|
|
161
|
+
The library uses a **default bone map** that expects specific bone names in your rig. If your model uses different names, pass a custom `BoneMap` to `tracker.bind()`.
|
|
162
|
+
|
|
163
|
+
> Check the skeleton at `rig.blend` for the expected bone names or append that rig to your project and use that as your skeleton.
|
|
164
|
+
|
|
165
|
+
### Default bone names
|
|
166
|
+
|
|
167
|
+
| Region | Bones |
|
|
168
|
+
|---|---|
|
|
169
|
+
| **Body** | `hips`, `torso`, `neck`, `head` |
|
|
170
|
+
| **Arms** | `upper_armL`, `forearmL`, `upper_armR`, `forearmR` |
|
|
171
|
+
| **Legs** | `thighL`, `shinL`, `footL`, `thighR`, `shinR`, `footR` |
|
|
172
|
+
| **Hands** | `handL`, `handR` |
|
|
173
|
+
| **Fingers (L/R)** | `index1L`–`index3L`, `middle1L`–`middle3L`, `ring1L`–`ring3L`, `pinky1L`–`pinky3L`, `thumb1L`–`thumb3L` *(same pattern for R)* |
|
|
174
|
+
| **Face** | Mesh named `face` (for blendshapes) |
|
|
175
|
+
|
|
176
|
+
### Custom bone map example
|
|
177
|
+
|
|
178
|
+
```ts
|
|
179
|
+
import type { BoneMap } from "three-mediapipe-rig";
|
|
180
|
+
|
|
181
|
+
// this tells the module what is the name of the expected bone.
|
|
182
|
+
const myBoneMap: BoneMap = {
|
|
183
|
+
faceMesh: "Head_Mesh",
|
|
184
|
+
head: "Head",
|
|
185
|
+
hips: "Hips",
|
|
186
|
+
neck: "Neck",
|
|
187
|
+
torso: "Spine1",
|
|
188
|
+
armL: "LeftArm",
|
|
189
|
+
forearmL: "LeftForeArm",
|
|
190
|
+
armR: "RightArm",
|
|
191
|
+
forearmR: "RightForeArm",
|
|
192
|
+
thighL: "LeftUpLeg",
|
|
193
|
+
shinL: "LeftLeg",
|
|
194
|
+
footL: "LeftFoot",
|
|
195
|
+
thighR: "RightUpLeg",
|
|
196
|
+
shinR: "RightLeg",
|
|
197
|
+
footR: "RightFoot",
|
|
198
|
+
handL: "LeftHand",
|
|
199
|
+
handR: "RightHand",
|
|
200
|
+
// ... finger bones ...
|
|
201
|
+
index1L: "LeftHandIndex1",
|
|
202
|
+
index2L: "LeftHandIndex2",
|
|
203
|
+
index3L: "LeftHandIndex3",
|
|
204
|
+
// (continue for all fingers)
|
|
205
|
+
};
|
|
206
|
+
|
|
207
|
+
const binding = tracker.bind(rig, myBoneMap);
|
|
208
|
+
```
|
|
209
|
+
|
|
210
|
+
---
|
|
211
|
+
|
|
212
|
+
## Multiple Characters
|
|
213
|
+
|
|
214
|
+
You can bind **multiple rigs** to the same tracker. All of them will mirror the tracked motion:
|
|
215
|
+
|
|
216
|
+
```ts
|
|
217
|
+
const laraBinding = tracker.bind(laraRig);
|
|
218
|
+
const robotBinding = tracker.bind(robotRig);
|
|
219
|
+
|
|
220
|
+
renderer.setAnimationLoop((time) => {
|
|
221
|
+
const delta = clock.update(time).getDelta();
|
|
222
|
+
laraBinding.update(delta);
|
|
223
|
+
robotBinding.update(delta);
|
|
224
|
+
renderer.render(scene, camera);
|
|
225
|
+
});
|
|
226
|
+
```
|
|
227
|
+
|
|
228
|
+
---
|
|
229
|
+
|
|
230
|
+
## Debugging with Video
|
|
231
|
+
|
|
232
|
+
During development, use a pre-recorded video instead of a live webcam:
|
|
233
|
+
|
|
234
|
+
```ts
|
|
235
|
+
const tracker = await setupTracker({
|
|
236
|
+
debugVideo: "test-video.mp4",
|
|
237
|
+
displayScale: 0.2, // small overlay in the corner
|
|
238
|
+
});
|
|
239
|
+
```
|
|
240
|
+
|
|
241
|
+
Or test against a single frame:
|
|
242
|
+
|
|
243
|
+
```ts
|
|
244
|
+
const tracker = await setupTracker({
|
|
245
|
+
debugFrame: "pose-reference.jpg",
|
|
246
|
+
displayScale: 0.5,
|
|
247
|
+
});
|
|
248
|
+
```
|
|
249
|
+
|
|
250
|
+
---
|
|
251
|
+
|
|
252
|
+
## License
|
|
253
|
+
|
|
254
|
+
MIT
|
package/dist/module.d.ts
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"module.d.ts","sourceRoot":"","sources":["../src/module.ts"],"names":[],"mappings":"AACA,cAAc,2BAA2B,CAAA"}
|