instance-rig 1.2.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. instance_rig-1.2.0/LICENSE +21 -0
  2. instance_rig-1.2.0/MANIFEST.in +5 -0
  3. instance_rig-1.2.0/PKG-INFO +162 -0
  4. instance_rig-1.2.0/README.md +101 -0
  5. instance_rig-1.2.0/instance_rig.egg-info/PKG-INFO +162 -0
  6. instance_rig-1.2.0/instance_rig.egg-info/SOURCES.txt +30 -0
  7. instance_rig-1.2.0/instance_rig.egg-info/dependency_links.txt +1 -0
  8. instance_rig-1.2.0/instance_rig.egg-info/entry_points.txt +2 -0
  9. instance_rig-1.2.0/instance_rig.egg-info/requires.txt +16 -0
  10. instance_rig-1.2.0/instance_rig.egg-info/top_level.txt +1 -0
  11. instance_rig-1.2.0/instancerig/BodyPixDetector.py +78 -0
  12. instance_rig-1.2.0/instancerig/BodyPixMapping.py +69 -0
  13. instance_rig-1.2.0/instancerig/InstanceRig.py +491 -0
  14. instance_rig-1.2.0/instancerig/__init__.py +0 -0
  15. instance_rig-1.2.0/instancerig/__main__.py +108 -0
  16. instance_rig-1.2.0/instancerig/_collada.py +60 -0
  17. instance_rig-1.2.0/instancerig/_gltf.py +169 -0
  18. instance_rig-1.2.0/instancerig/_io.py +12 -0
  19. instance_rig-1.2.0/instancerig/_math.py +163 -0
  20. instance_rig-1.2.0/instancerig/_pose_correction.py +154 -0
  21. instance_rig-1.2.0/instancerig/_preview.py +129 -0
  22. instance_rig-1.2.0/instancerig/_utils.py +58 -0
  23. instance_rig-1.2.0/instancerig/_vision.py +44 -0
  24. instance_rig-1.2.0/instancerig/app/AsyncInstanceRig.py +112 -0
  25. instance_rig-1.2.0/instancerig/app/__init__.py +0 -0
  26. instance_rig-1.2.0/instancerig/app/base_app.py +89 -0
  27. instance_rig-1.2.0/instancerig/model/Joint.py +121 -0
  28. instance_rig-1.2.0/instancerig/model/RemoteAsset.py +46 -0
  29. instance_rig-1.2.0/instancerig/model/Rig.py +11 -0
  30. instance_rig-1.2.0/instancerig/model/__init__.py +5 -0
  31. instance_rig-1.2.0/pyproject.toml +93 -0
  32. instance_rig-1.2.0/setup.cfg +4 -0
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Florian Bruggisser
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,5 @@
1
+ include LICENSE
2
+ include README.md
3
+ include pyproject.toml
4
+ graft instancerig
5
+ global-exclude *.py[cod]
@@ -0,0 +1,162 @@
1
+ Metadata-Version: 2.4
2
+ Name: instance-rig
3
+ Version: 1.2.0
4
+ Summary: 3D Human pose auto rigging by using 2d projections and pose detection.
5
+ Author-email: Florian Bruggisser <github@broox.ch>
6
+ License: MIT License
7
+
8
+ Copyright (c) 2026 Florian Bruggisser
9
+
10
+ Permission is hereby granted, free of charge, to any person obtaining a copy
11
+ of this software and associated documentation files (the "Software"), to deal
12
+ in the Software without restriction, including without limitation the rights
13
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14
+ copies of the Software, and to permit persons to whom the Software is
15
+ furnished to do so, subject to the following conditions:
16
+
17
+ The above copyright notice and this permission notice shall be included in all
18
+ copies or substantial portions of the Software.
19
+
20
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26
+ SOFTWARE.
27
+ Project-URL: Homepage, https://github.com/cansik/instance-rig
28
+ Project-URL: Repository, https://github.com/cansik/instance-rig.git
29
+ Classifier: Development Status :: 4 - Beta
30
+ Classifier: License :: OSI Approved :: MIT License
31
+ Classifier: Intended Audience :: Developers
32
+ Classifier: Intended Audience :: Education
33
+ Classifier: Intended Audience :: Science/Research
34
+ Classifier: Programming Language :: Python :: 3 :: Only
35
+ Classifier: Programming Language :: Python :: 3.9
36
+ Classifier: Programming Language :: Python :: 3.10
37
+ Classifier: Programming Language :: Python :: 3.11
38
+ Classifier: Programming Language :: Python :: 3.12
39
+ Classifier: Operating System :: MacOS :: MacOS X
40
+ Classifier: Operating System :: Microsoft :: Windows
41
+ Classifier: Operating System :: POSIX :: Linux
42
+ Classifier: Topic :: Multimedia
43
+ Classifier: Topic :: Multimedia :: Graphics :: 3D Modeling
44
+ Classifier: Topic :: Scientific/Engineering :: Image Processing
45
+ Requires-Python: >=3.10
46
+ Description-Content-Type: text/markdown
47
+ License-File: LICENSE
48
+ Requires-Dist: muke~=0.4.0
49
+ Requires-Dist: rich
50
+ Requires-Dist: tensorflow-macos; platform_system == "Darwin"
51
+ Requires-Dist: tensorflow-metal; platform_system == "Darwin"
52
+ Requires-Dist: tensorflow; platform_system != "Darwin"
53
+ Requires-Dist: tf-bodypix~=0.4.2
54
+ Requires-Dist: open3d>=0.16.0
55
+ Requires-Dist: opencv-python
56
+ Requires-Dist: numba
57
+ Requires-Dist: numpy
58
+ Requires-Dist: pycollada>=0.9.3
59
+ Requires-Dist: volumesh>=1.2.5
60
+ Dynamic: license-file
61
+
62
+ # instance-rig [![PyPI](https://img.shields.io/pypi/v/instance-rig)](https://pypi.org/project/instance-rig/)
63
+
64
+ Automated 3D humanoid rigging system that leverages 2D deep learning models to create functional skeletons and skin weights for static meshes.
65
+
66
+ <picture>
67
+ <source media="(prefers-color-scheme: dark)" srcset="documentation/process_dark.png">
68
+ <source media="(prefers-color-scheme: light)" srcset="documentation/process_bright.png">
69
+ <img src="documentation/process_bright.png" alt="Process">
70
+ </picture>
71
+
72
+ *Instance-rig process of skeleton and skin-weight generation.*
73
+
74
+ Instance-rig simplifies the complex task of 3D rigging by projecting it into the 2D domain. It uses a pre-trained 2D pose estimation model ([BodyPix](https://blog.tensorflow.org/2019/11/updated-bodypix-2.html)) to analyze a rendered view of the 3D mesh. The insights from the 2D analysis, such as joint locations and body part segmentation, are then mapped back onto the 3D geometry to construct a hierarchical skeleton and generate smooth skin weights.
75
+
76
+ The method is robust across a wide range of inputs and can operate on almost any mesh that exhibits a bipedal humanoid shape, without requiring manual cleanup or topology specific constraints. This makes it especially well suited for fast, fully automatic processing pipelines.
77
+
78
+ Instance-rig has been validated in large scale exhibition environments, where thousands of visitors were scanned, reconstructed, and rigged fully automatically. In these real world scenarios, the system consistently delivered complete, animation ready humanoid rigs in under one second for meshes captured in standard T-pose or A-pose configurations.
79
+
80
+ ## How it Works
81
+
82
+ The process is fully automated and consists of two main phases:
83
+
84
+ ### 1. Skeleton Construction
85
+ The system builds a hierarchical skeleton by identifying key anatomical landmarks.
86
+ * **2D Projection**: The 3D mesh is rendered into a 2D image from a frontal camera view.
87
+ * **Keypoint Detection**: BodyPix analyzes the image to find 2D pixel coordinates for 17 key joints (e.g., shoulders, knees).
88
+ * **3D Triangulation**: These 2D points are projected back into 3D space by ray-casting against the original mesh.
89
+ * **Hierarchy Generation**: Missing intermediate joints (like the spine and neck) are interpolated from the detected points to form a complete, parent-child bone hierarchy.
90
+
91
+ You can learn more about 3D keypoint detection in the [muke](https://github.com/cansik/multiview-3d-keypoint-detection/) repository.
92
+
93
+ ### 2. Skin Weight Generation
94
+ The mesh vertices are bound to the skeleton using a "weight painting" process derived from segmentation maps.
95
+ * **Part Segmentation**: BodyPix generates a 2D map identifying body parts (e.g., torso, left arm).
96
+ * **Refinement**: The system programmatically subdivides broad regions (like the torso) to align with the more detailed spinal joints of the generated skeleton.
97
+ * **Influence Mapping**: 3D vertices are mapped to the 2D segmentation map using UV coordinates (derived from world-space XY coordinates).
98
+ * **Weight Smoothing**: Binary influence maps for each joint are blurred to create soft gradients, ensuring smooth mesh deformation.
99
+ * **Binding**: Each vertex is assigned weights based on the blurred influence maps, linking it to the most relevant joints.
100
+
101
+ ## Installation
102
+
103
+ You can install the package directly via pip:
104
+
105
+ ```bash
106
+ pip install instance-rig
107
+ ```
108
+
109
+ ## Usage
110
+
111
+ To rig a 3D mesh (e.g., an `.obj` file), simply run the command line tool. The default output is a `.glb` file.
112
+
113
+ ```bash
114
+ instance-rig input_mesh.obj
115
+ ```
116
+
117
+ ### Options
118
+
119
+ * `--output <file>`: Specify the output file path. Supported formats are `.glb`, `.gltf`, and `.dae` (experimental, mesh-only).
120
+ * `--smooth-weights`: Enable additional smoothing for skin weights.
121
+ * `--smooth-weights-factor <int>`: Set the filter size for weight smoothing (default: 20).
122
+ * `--t-pose`: Applies correction to the mesh to be in T-Pose.
123
+ * `--debug`: Display debug frames and additional information during processing.
124
+ * `--skip-prediction`: Attempt to use cached prediction results if available.
125
+
126
+ ### Example
127
+
128
+ Rig a mesh and save it as a GLTF file with custom weight smoothing:
129
+
130
+ ```bash
131
+ instance-rig character.obj --output character.gltf --smooth-weights --smooth-weights-factor 30
132
+ ```
133
+
134
+ ### Gradio
135
+
136
+ To run instance-rig as a service, a basic Gradio interface is already available. It accepts zip files containing 3D assets (OBJ, MTL, and texture files), unpacks and processes the data, performs the automatic rigging, and returns the result as a zipped GLB file suitable for downstream use.
137
+
138
+ ```bash
139
+ uv run --group demo demo/gradio-demo.py
140
+ ```
141
+
142
+ ## Model Assets and Caching
143
+
144
+ The required model files are automatically downloaded on the first run and stored in a user-writable cache directory:
145
+ - **Linux/macOS**: `~/.cache/instancerig`
146
+ - **Windows**: `%LOCALAPPDATA%\instancerig`
147
+
148
+ You can override this location by setting the `INSTANCERIG_CACHE_DIR` environment variable. For offline environments, pre-download the model files and place them in this directory.
149
+
150
+ ## Limitations
151
+
152
+ * **Pose Assumption**: The system assumes the input mesh is centered and facing forward (typically +Z or -Z depending on coordinate system) in a T-pose or A-pose.
153
+ * **UV Mapping**: UV coordinates for the segmentation lookup are generated by normalizing the X and Y world coordinates. This works best for meshes that are aligned with the world axes.
154
+ * **Single View**: The analysis relies on a single frontal view, so occluded parts or complex poses may not be rigged correctly.
155
+ * **Rig Compatibility**: The generated rig does not follow common production standards such as Mixamo style skeletons. As a result, additional custom tooling or conversion steps are required to animate the output reliably in environments like Blender or Unity.
156
+ * **Collada Export**: The `.dae` export is currently experimental and only supports mesh data. Joints and skin weights are not included in the export.
157
+
158
+ ## Credits
159
+
160
+ Developed at the [Immersive Arts Space](https://blog.zhdk.ch/immersivearts/),
161
+ [Zurich University of the Arts (ZHdK)](https://www.zhdk.ch/).
162
+ Maintained by Florian Bruggisser.
@@ -0,0 +1,101 @@
1
+ # instance-rig [![PyPI](https://img.shields.io/pypi/v/instance-rig)](https://pypi.org/project/instance-rig/)
2
+
3
+ Automated 3D humanoid rigging system that leverages 2D deep learning models to create functional skeletons and skin weights for static meshes.
4
+
5
+ <picture>
6
+ <source media="(prefers-color-scheme: dark)" srcset="documentation/process_dark.png">
7
+ <source media="(prefers-color-scheme: light)" srcset="documentation/process_bright.png">
8
+ <img src="documentation/process_bright.png" alt="Process">
9
+ </picture>
10
+
11
+ *Instance-rig process of skeleton and skin-weight generation.*
12
+
13
+ Instance-rig simplifies the complex task of 3D rigging by projecting it into the 2D domain. It uses a pre-trained 2D pose estimation model ([BodyPix](https://blog.tensorflow.org/2019/11/updated-bodypix-2.html)) to analyze a rendered view of the 3D mesh. The insights from the 2D analysis, such as joint locations and body part segmentation, are then mapped back onto the 3D geometry to construct a hierarchical skeleton and generate smooth skin weights.
14
+
15
+ The method is robust across a wide range of inputs and can operate on almost any mesh that exhibits a bipedal humanoid shape, without requiring manual cleanup or topology specific constraints. This makes it especially well suited for fast, fully automatic processing pipelines.
16
+
17
+ Instance-rig has been validated in large scale exhibition environments, where thousands of visitors were scanned, reconstructed, and rigged fully automatically. In these real world scenarios, the system consistently delivered complete, animation ready humanoid rigs in under one second for meshes captured in standard T-pose or A-pose configurations.
18
+
19
+ ## How it Works
20
+
21
+ The process is fully automated and consists of two main phases:
22
+
23
+ ### 1. Skeleton Construction
24
+ The system builds a hierarchical skeleton by identifying key anatomical landmarks.
25
+ * **2D Projection**: The 3D mesh is rendered into a 2D image from a frontal camera view.
26
+ * **Keypoint Detection**: BodyPix analyzes the image to find 2D pixel coordinates for 17 key joints (e.g., shoulders, knees).
27
+ * **3D Triangulation**: These 2D points are projected back into 3D space by ray-casting against the original mesh.
28
+ * **Hierarchy Generation**: Missing intermediate joints (like the spine and neck) are interpolated from the detected points to form a complete, parent-child bone hierarchy.
29
+
30
+ You can learn more about 3D keypoint detection in the [muke](https://github.com/cansik/multiview-3d-keypoint-detection/) repository.
31
+
32
+ ### 2. Skin Weight Generation
33
+ The mesh vertices are bound to the skeleton using a "weight painting" process derived from segmentation maps.
34
+ * **Part Segmentation**: BodyPix generates a 2D map identifying body parts (e.g., torso, left arm).
35
+ * **Refinement**: The system programmatically subdivides broad regions (like the torso) to align with the more detailed spinal joints of the generated skeleton.
36
+ * **Influence Mapping**: 3D vertices are mapped to the 2D segmentation map using UV coordinates (derived from world-space XY coordinates).
37
+ * **Weight Smoothing**: Binary influence maps for each joint are blurred to create soft gradients, ensuring smooth mesh deformation.
38
+ * **Binding**: Each vertex is assigned weights based on the blurred influence maps, linking it to the most relevant joints.
39
+
40
+ ## Installation
41
+
42
+ You can install the package directly via pip:
43
+
44
+ ```bash
45
+ pip install instance-rig
46
+ ```
47
+
48
+ ## Usage
49
+
50
+ To rig a 3D mesh (e.g., an `.obj` file), simply run the command line tool. The default output is a `.glb` file.
51
+
52
+ ```bash
53
+ instance-rig input_mesh.obj
54
+ ```
55
+
56
+ ### Options
57
+
58
+ * `--output <file>`: Specify the output file path. Supported formats are `.glb`, `.gltf`, and `.dae` (experimental, mesh-only).
59
+ * `--smooth-weights`: Enable additional smoothing for skin weights.
60
+ * `--smooth-weights-factor <int>`: Set the filter size for weight smoothing (default: 20).
61
+ * `--t-pose`: Applies correction to the mesh to be in T-Pose.
62
+ * `--debug`: Display debug frames and additional information during processing.
63
+ * `--skip-prediction`: Attempt to use cached prediction results if available.
64
+
65
+ ### Example
66
+
67
+ Rig a mesh and save it as a GLTF file with custom weight smoothing:
68
+
69
+ ```bash
70
+ instance-rig character.obj --output character.gltf --smooth-weights --smooth-weights-factor 30
71
+ ```
72
+
73
+ ### Gradio
74
+
75
+ To run instance-rig as a service, a basic Gradio interface is already available. It accepts zip files containing 3D assets (OBJ, MTL, and texture files), unpacks and processes the data, performs the automatic rigging, and returns the result as a zipped GLB file suitable for downstream use.
76
+
77
+ ```bash
78
+ uv run --group demo demo/gradio-demo.py
79
+ ```
80
+
81
+ ## Model Assets and Caching
82
+
83
+ The required model files are automatically downloaded on the first run and stored in a user-writable cache directory:
84
+ - **Linux/macOS**: `~/.cache/instancerig`
85
+ - **Windows**: `%LOCALAPPDATA%\instancerig`
86
+
87
+ You can override this location by setting the `INSTANCERIG_CACHE_DIR` environment variable. For offline environments, pre-download the model files and place them in this directory.
88
+
89
+ ## Limitations
90
+
91
+ * **Pose Assumption**: The system assumes the input mesh is centered and facing forward (typically +Z or -Z depending on coordinate system) in a T-pose or A-pose.
92
+ * **UV Mapping**: UV coordinates for the segmentation lookup are generated by normalizing the X and Y world coordinates. This works best for meshes that are aligned with the world axes.
93
+ * **Single View**: The analysis relies on a single frontal view, so occluded parts or complex poses may not be rigged correctly.
94
+ * **Rig Compatibility**: The generated rig does not follow common production standards such as Mixamo style skeletons. As a result, additional custom tooling or conversion steps are required to animate the output reliably in environments like Blender or Unity.
95
+ * **Collada Export**: The `.dae` export is currently experimental and only supports mesh data. Joints and skin weights are not included in the export.
96
+
97
+ ## Credits
98
+
99
+ Developed at the [Immersive Arts Space](https://blog.zhdk.ch/immersivearts/),
100
+ [Zurich University of the Arts (ZHdK)](https://www.zhdk.ch/).
101
+ Maintained by Florian Bruggisser.
@@ -0,0 +1,162 @@
1
+ Metadata-Version: 2.4
2
+ Name: instance-rig
3
+ Version: 1.2.0
4
+ Summary: 3D Human pose auto rigging by using 2d projections and pose detection.
5
+ Author-email: Florian Bruggisser <github@broox.ch>
6
+ License: MIT License
7
+
8
+ Copyright (c) 2026 Florian Bruggisser
9
+
10
+ Permission is hereby granted, free of charge, to any person obtaining a copy
11
+ of this software and associated documentation files (the "Software"), to deal
12
+ in the Software without restriction, including without limitation the rights
13
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14
+ copies of the Software, and to permit persons to whom the Software is
15
+ furnished to do so, subject to the following conditions:
16
+
17
+ The above copyright notice and this permission notice shall be included in all
18
+ copies or substantial portions of the Software.
19
+
20
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26
+ SOFTWARE.
27
+ Project-URL: Homepage, https://github.com/cansik/instance-rig
28
+ Project-URL: Repository, https://github.com/cansik/instance-rig.git
29
+ Classifier: Development Status :: 4 - Beta
30
+ Classifier: License :: OSI Approved :: MIT License
31
+ Classifier: Intended Audience :: Developers
32
+ Classifier: Intended Audience :: Education
33
+ Classifier: Intended Audience :: Science/Research
34
+ Classifier: Programming Language :: Python :: 3 :: Only
35
+ Classifier: Programming Language :: Python :: 3.9
36
+ Classifier: Programming Language :: Python :: 3.10
37
+ Classifier: Programming Language :: Python :: 3.11
38
+ Classifier: Programming Language :: Python :: 3.12
39
+ Classifier: Operating System :: MacOS :: MacOS X
40
+ Classifier: Operating System :: Microsoft :: Windows
41
+ Classifier: Operating System :: POSIX :: Linux
42
+ Classifier: Topic :: Multimedia
43
+ Classifier: Topic :: Multimedia :: Graphics :: 3D Modeling
44
+ Classifier: Topic :: Scientific/Engineering :: Image Processing
45
+ Requires-Python: >=3.10
46
+ Description-Content-Type: text/markdown
47
+ License-File: LICENSE
48
+ Requires-Dist: muke~=0.4.0
49
+ Requires-Dist: rich
50
+ Requires-Dist: tensorflow-macos; platform_system == "Darwin"
51
+ Requires-Dist: tensorflow-metal; platform_system == "Darwin"
52
+ Requires-Dist: tensorflow; platform_system != "Darwin"
53
+ Requires-Dist: tf-bodypix~=0.4.2
54
+ Requires-Dist: open3d>=0.16.0
55
+ Requires-Dist: opencv-python
56
+ Requires-Dist: numba
57
+ Requires-Dist: numpy
58
+ Requires-Dist: pycollada>=0.9.3
59
+ Requires-Dist: volumesh>=1.2.5
60
+ Dynamic: license-file
61
+
62
+ # instance-rig [![PyPI](https://img.shields.io/pypi/v/instance-rig)](https://pypi.org/project/instance-rig/)
63
+
64
+ Automated 3D humanoid rigging system that leverages 2D deep learning models to create functional skeletons and skin weights for static meshes.
65
+
66
+ <picture>
67
+ <source media="(prefers-color-scheme: dark)" srcset="documentation/process_dark.png">
68
+ <source media="(prefers-color-scheme: light)" srcset="documentation/process_bright.png">
69
+ <img src="documentation/process_bright.png" alt="Process">
70
+ </picture>
71
+
72
+ *Instance-rig process of skeleton and skin-weight generation.*
73
+
74
+ Instance-rig simplifies the complex task of 3D rigging by projecting it into the 2D domain. It uses a pre-trained 2D pose estimation model ([BodyPix](https://blog.tensorflow.org/2019/11/updated-bodypix-2.html)) to analyze a rendered view of the 3D mesh. The insights from the 2D analysis, such as joint locations and body part segmentation, are then mapped back onto the 3D geometry to construct a hierarchical skeleton and generate smooth skin weights.
75
+
76
+ The method is robust across a wide range of inputs and can operate on almost any mesh that exhibits a bipedal humanoid shape, without requiring manual cleanup or topology specific constraints. This makes it especially well suited for fast, fully automatic processing pipelines.
77
+
78
+ Instance-rig has been validated in large scale exhibition environments, where thousands of visitors were scanned, reconstructed, and rigged fully automatically. In these real world scenarios, the system consistently delivered complete, animation ready humanoid rigs in under one second for meshes captured in standard T-pose or A-pose configurations.
79
+
80
+ ## How it Works
81
+
82
+ The process is fully automated and consists of two main phases:
83
+
84
+ ### 1. Skeleton Construction
85
+ The system builds a hierarchical skeleton by identifying key anatomical landmarks.
86
+ * **2D Projection**: The 3D mesh is rendered into a 2D image from a frontal camera view.
87
+ * **Keypoint Detection**: BodyPix analyzes the image to find 2D pixel coordinates for 17 key joints (e.g., shoulders, knees).
88
+ * **3D Triangulation**: These 2D points are projected back into 3D space by ray-casting against the original mesh.
89
+ * **Hierarchy Generation**: Missing intermediate joints (like the spine and neck) are interpolated from the detected points to form a complete, parent-child bone hierarchy.
90
+
91
+ You can learn more about 3D keypoint detection in the [muke](https://github.com/cansik/multiview-3d-keypoint-detection/) repository.
92
+
93
+ ### 2. Skin Weight Generation
94
+ The mesh vertices are bound to the skeleton using a "weight painting" process derived from segmentation maps.
95
+ * **Part Segmentation**: BodyPix generates a 2D map identifying body parts (e.g., torso, left arm).
96
+ * **Refinement**: The system programmatically subdivides broad regions (like the torso) to align with the more detailed spinal joints of the generated skeleton.
97
+ * **Influence Mapping**: 3D vertices are mapped to the 2D segmentation map using UV coordinates (derived from world-space XY coordinates).
98
+ * **Weight Smoothing**: Binary influence maps for each joint are blurred to create soft gradients, ensuring smooth mesh deformation.
99
+ * **Binding**: Each vertex is assigned weights based on the blurred influence maps, linking it to the most relevant joints.
100
+
101
+ ## Installation
102
+
103
+ You can install the package directly via pip:
104
+
105
+ ```bash
106
+ pip install instance-rig
107
+ ```
108
+
109
+ ## Usage
110
+
111
+ To rig a 3D mesh (e.g., an `.obj` file), simply run the command line tool. The default output is a `.glb` file.
112
+
113
+ ```bash
114
+ instance-rig input_mesh.obj
115
+ ```
116
+
117
+ ### Options
118
+
119
+ * `--output <file>`: Specify the output file path. Supported formats are `.glb`, `.gltf`, and `.dae` (experimental, mesh-only).
120
+ * `--smooth-weights`: Enable additional smoothing for skin weights.
121
+ * `--smooth-weights-factor <int>`: Set the filter size for weight smoothing (default: 20).
122
+ * `--t-pose`: Applies correction to the mesh to be in T-Pose.
123
+ * `--debug`: Display debug frames and additional information during processing.
124
+ * `--skip-prediction`: Attempt to use cached prediction results if available.
125
+
126
+ ### Example
127
+
128
+ Rig a mesh and save it as a GLTF file with custom weight smoothing:
129
+
130
+ ```bash
131
+ instance-rig character.obj --output character.gltf --smooth-weights --smooth-weights-factor 30
132
+ ```
133
+
134
+ ### Gradio
135
+
136
+ To run instance-rig as a service, a basic Gradio interface is already available. It accepts zip files containing 3D assets (OBJ, MTL, and texture files), unpacks and processes the data, performs the automatic rigging, and returns the result as a zipped GLB file suitable for downstream use.
137
+
138
+ ```bash
139
+ uv run --group demo demo/gradio-demo.py
140
+ ```
141
+
142
+ ## Model Assets and Caching
143
+
144
+ The required model files are automatically downloaded on the first run and stored in a user-writable cache directory:
145
+ - **Linux/macOS**: `~/.cache/instancerig`
146
+ - **Windows**: `%LOCALAPPDATA%\instancerig`
147
+
148
+ You can override this location by setting the `INSTANCERIG_CACHE_DIR` environment variable. For offline environments, pre-download the model files and place them in this directory.
149
+
150
+ ## Limitations
151
+
152
+ * **Pose Assumption**: The system assumes the input mesh is centered and facing forward (typically +Z or -Z depending on coordinate system) in a T-pose or A-pose.
153
+ * **UV Mapping**: UV coordinates for the segmentation lookup are generated by normalizing the X and Y world coordinates. This works best for meshes that are aligned with the world axes.
154
+ * **Single View**: The analysis relies on a single frontal view, so occluded parts or complex poses may not be rigged correctly.
155
+ * **Rig Compatibility**: The generated rig does not follow common production standards such as Mixamo style skeletons. As a result, additional custom tooling or conversion steps are required to animate the output reliably in environments like Blender or Unity.
156
+ * **Collada Export**: The `.dae` export is currently experimental and only supports mesh data. Joints and skin weights are not included in the export.
157
+
158
+ ## Credits
159
+
160
+ Developed at the [Immersive Arts Space](https://blog.zhdk.ch/immersivearts/),
161
+ [Zurich University of the Arts (ZHdK)](https://www.zhdk.ch/).
162
+ Maintained by Florian Bruggisser.
@@ -0,0 +1,30 @@
1
+ LICENSE
2
+ MANIFEST.in
3
+ README.md
4
+ pyproject.toml
5
+ instance_rig.egg-info/PKG-INFO
6
+ instance_rig.egg-info/SOURCES.txt
7
+ instance_rig.egg-info/dependency_links.txt
8
+ instance_rig.egg-info/entry_points.txt
9
+ instance_rig.egg-info/requires.txt
10
+ instance_rig.egg-info/top_level.txt
11
+ instancerig/BodyPixDetector.py
12
+ instancerig/BodyPixMapping.py
13
+ instancerig/InstanceRig.py
14
+ instancerig/__init__.py
15
+ instancerig/__main__.py
16
+ instancerig/_collada.py
17
+ instancerig/_gltf.py
18
+ instancerig/_io.py
19
+ instancerig/_math.py
20
+ instancerig/_pose_correction.py
21
+ instancerig/_preview.py
22
+ instancerig/_utils.py
23
+ instancerig/_vision.py
24
+ instancerig/app/AsyncInstanceRig.py
25
+ instancerig/app/__init__.py
26
+ instancerig/app/base_app.py
27
+ instancerig/model/Joint.py
28
+ instancerig/model/RemoteAsset.py
29
+ instancerig/model/Rig.py
30
+ instancerig/model/__init__.py
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ instance-rig = instancerig.__main__:main
@@ -0,0 +1,16 @@
1
+ muke~=0.4.0
2
+ rich
3
+ tf-bodypix~=0.4.2
4
+ open3d>=0.16.0
5
+ opencv-python
6
+ numba
7
+ numpy
8
+ pycollada>=0.9.3
9
+ volumesh>=1.2.5
10
+
11
+ [:platform_system != "Darwin"]
12
+ tensorflow
13
+
14
+ [:platform_system == "Darwin"]
15
+ tensorflow-macos
16
+ tensorflow-metal
@@ -0,0 +1 @@
1
+ instancerig
@@ -0,0 +1,78 @@
1
+ import os
2
+ from pathlib import Path
3
+ from typing import Optional, List
4
+
5
+ import cv2
6
+ import numpy as np
7
+ from muke.detector.BaseDetector import BaseDetector
8
+ from muke.detector.KeyPoint2 import KeyPoint2
9
+
10
+ from instancerig._utils import get_cache_dir
11
+ from instancerig.model.RemoteAsset import RemoteAsset
12
+
13
+ os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
14
+
15
+ from tf_bodypix.model import BodyPixModelWrapper, BodyPixResultWrapper, ModelArchitectureNames
16
+ from tf_bodypix.api import load_model
17
+
18
+
19
+ class BodyPixTensorflowModel(RemoteAsset):
20
+ BODY_PIX_REPOSITORY_URL = "https://github.com/cansik/data-storage/releases/download/bodypix"
21
+
22
+ def __init__(self, model_name: str):
23
+ file_name = f"{model_name}.pb"
24
+ download_url = f"{self.BODY_PIX_REPOSITORY_URL}/{file_name}"
25
+ local_path = get_cache_dir().joinpath(model_name).joinpath("saved_model.pb")
26
+
27
+ super().__init__(model_name, download_url, local_path)
28
+
29
+ def get_path(self) -> Path:
30
+ path = super().get_path()
31
+ return path.parent
32
+
33
+
34
+ BODY_PIX_RESNET_S16_480x640_TF = BodyPixTensorflowModel("bodypix-resnet50-s16-480x640")
35
+
36
+
37
+ class BodyPixDetector(BaseDetector):
38
+ def __init__(self):
39
+ model_path = BODY_PIX_RESNET_S16_480x640_TF.get_path()
40
+ self.bodypix_model: BodyPixModelWrapper = load_model(
41
+ str(model_path), output_stride=16, architecture_name=ModelArchitectureNames.RESNET_50
42
+ )
43
+
44
+ self.image: Optional[np.ndarray] = None
45
+ self.result: Optional[BodyPixResultWrapper] = None
46
+
47
+ def setup(self):
48
+ pass
49
+
50
+ def release(self):
51
+ pass
52
+
53
+ def reset(self):
54
+ self.image = None
55
+ self.result = None
56
+
57
+ def detect(self, image: np.ndarray) -> List[KeyPoint2]:
58
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
59
+ result = self.bodypix_model.predict_single(image)
60
+
61
+ poses = result.get_poses()
62
+ pose = poses[0]
63
+
64
+ h, w = image.shape[:2]
65
+
66
+ landmarks: [KeyPoint2] = []
67
+ keypoints = sorted(list(pose.keypoints.items()), key=lambda x: x[0])
68
+ for i, kp in keypoints:
69
+ lm = KeyPoint2(i, kp.position.x / w, kp.position.y / h, kp.score)
70
+ landmarks.append(lm)
71
+
72
+ if self.result is None:
73
+ self.result = result
74
+
75
+ if self.image is None:
76
+ self.image = image
77
+
78
+ return landmarks
@@ -0,0 +1,69 @@
1
+ from tf_bodypix.bodypix_js_utils.keypoints import PART_NAMES
2
+ from tf_bodypix.bodypix_js_utils.part_channels import PART_CHANNELS
3
+
4
+
5
+ class BodyPixPoseMapping:
6
+ Nose = 0
7
+ LeftEye = 1
8
+ RightEye = 2
9
+ LeftEar = 3
10
+ RightEar = 4
11
+ LeftShoulder = 5
12
+
13
+ RightShoulder = 6
14
+ LeftElbow = 7
15
+ RightElbow = 8
16
+ LeftWrist = 9
17
+ RightWrist = 10
18
+
19
+ LeftHip = 11
20
+ RightHip = 12
21
+ LeftKnee = 13
22
+ RightKnee = 14
23
+ LeftAnkle = 15
24
+ RightAnkle = 16
25
+
26
+ def __iter__(self):
27
+ return enumerate(PART_NAMES)
28
+
29
+
30
+ class BodyPixPartMapping:
31
+ LeftFace = 0
32
+ RightFace = 1
33
+ LeftUpperArmFront = 2
34
+ LeftUpperArmBack = 3
35
+ RightUpperArmFront = 4
36
+ RightUpperArmBack = 5
37
+ LeftLowerArmFront = 6
38
+ LeftLowerArmBack = 7
39
+ RightLowerArmFront = 8
40
+ RightLowerArmBack = 9
41
+ LeftHand = 10
42
+ RightHand = 11
43
+ TorsoFront = 12
44
+ TorsoBack = 13
45
+ LeftUpperLegFront = 14
46
+ LeftUpperLegBack = 15
47
+ RightUpperLegFront = 16
48
+ RightUpperLegBack = 17
49
+ LeftLowerLegFront = 18
50
+ LeftLowerLegBack = 19
51
+ RightLowerLegFront = 20
52
+ RightLowerLegBack = 21
53
+ LeftFeet = 22
54
+ RightFeet = 23
55
+
56
+ # additional maps generated by instance rig
57
+ Neck = 24
58
+ Spine1 = 25
59
+ Spine2 = 26
60
+ Spine3 = 27
61
+
62
+ # do not use this value
63
+ NoMapping = 254
64
+
65
+ def __iter__(self):
66
+ return enumerate(PART_CHANNELS)
67
+
68
+
69
+ PART_ID_TO_NAME = {part_id: part_name for part_id, part_name in enumerate(PART_NAMES)}