mp-gesture-lib 1.0.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mp_gesture_lib-1.0.0/LICENSE.txt +21 -0
- mp_gesture_lib-1.0.0/PKG-INFO +101 -0
- mp_gesture_lib-1.0.0/README.md +73 -0
- mp_gesture_lib-1.0.0/mp_gesture_lib/__init__.py +23 -0
- mp_gesture_lib-1.0.0/mp_gesture_lib/detector.py +473 -0
- mp_gesture_lib-1.0.0/mp_gesture_lib/models/__init__.py +1 -0
- mp_gesture_lib-1.0.0/mp_gesture_lib/models/operations.task +0 -0
- mp_gesture_lib-1.0.0/mp_gesture_lib/registry.py +70 -0
- mp_gesture_lib-1.0.0/mp_gesture_lib.egg-info/PKG-INFO +101 -0
- mp_gesture_lib-1.0.0/mp_gesture_lib.egg-info/SOURCES.txt +13 -0
- mp_gesture_lib-1.0.0/mp_gesture_lib.egg-info/dependency_links.txt +1 -0
- mp_gesture_lib-1.0.0/mp_gesture_lib.egg-info/requires.txt +3 -0
- mp_gesture_lib-1.0.0/mp_gesture_lib.egg-info/top_level.txt +1 -0
- mp_gesture_lib-1.0.0/pyproject.toml +44 -0
- mp_gesture_lib-1.0.0/setup.cfg +4 -0
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Debabrata Saha
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: mp-gesture-lib
|
|
3
|
+
Version: 1.0.0
|
|
4
|
+
Summary: Plug-and-play hand gesture recognition module built on MediaPipe and OpenCV
|
|
5
|
+
License: MIT
|
|
6
|
+
Project-URL: Homepage, https://debabratasaha-dev.github.io/mp-gesture-lib-package
|
|
7
|
+
Project-URL: Repository, https://github.com/debabratasaha-dev/mp-gesture-lib-package
|
|
8
|
+
Project-URL: Bug Tracker, https://github.com/debabratasaha-dev/mp-gesture-lib-package/issues
|
|
9
|
+
Keywords: mediapipe,gesture,hand,recognition,computer-vision,opencv
|
|
10
|
+
Classifier: Development Status :: 4 - Beta
|
|
11
|
+
Classifier: Intended Audience :: Developers
|
|
12
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
13
|
+
Classifier: Programming Language :: Python :: 3
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
19
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
20
|
+
Classifier: Topic :: Scientific/Engineering :: Image Recognition
|
|
21
|
+
Requires-Python: <3.13,>=3.8
|
|
22
|
+
Description-Content-Type: text/markdown
|
|
23
|
+
License-File: LICENSE.txt
|
|
24
|
+
Requires-Dist: mediapipe>=0.10.0
|
|
25
|
+
Requires-Dist: opencv-python>=4.5.0
|
|
26
|
+
Requires-Dist: numpy>=1.21.0
|
|
27
|
+
Dynamic: license-file
|
|
28
|
+
|
|
29
|
+
# MP-Gesture-Lib
|
|
30
|
+
|
|
31
|
+
A plug-and-play **hand gesture recognition module** built on MediaPipe and OpenCV.
|
|
32
|
+
Detect gesture names + confidence from a webcam frame in **one function call**.
|
|
33
|
+
|
|
34
|
+
📖 **[Full Documentation →](https://debabratasaha-dev.github.io/mp-gesture-lib-package)**
|
|
35
|
+
|
|
36
|
+
---
|
|
37
|
+
|
|
38
|
+
## Install
|
|
39
|
+
|
|
40
|
+
```bash
|
|
41
|
+
pip install mp-gesture-lib
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
> Requires Python >= 3.8 Bundled model included — no external files needed. (check `mediapipe` support for your python version)
|
|
45
|
+
|
|
46
|
+
---
|
|
47
|
+
|
|
48
|
+
## Quick Start
|
|
49
|
+
|
|
50
|
+
```python
|
|
51
|
+
import cv2
|
|
52
|
+
from mp_gesture_lib import GestureDetector
|
|
53
|
+
|
|
54
|
+
detector = GestureDetector() # zero-config
|
|
55
|
+
|
|
56
|
+
cap = cv2.VideoCapture(0)
|
|
57
|
+
while cap.isOpened():
|
|
58
|
+
ok, frame = cap.read()
|
|
59
|
+
result = detector.detect(frame)
|
|
60
|
+
|
|
61
|
+
print(result.gesture) # "plus", "3", "minus", "unknown" …
|
|
62
|
+
print(result.confidence) # 0.0 – 1.0
|
|
63
|
+
|
|
64
|
+
cv2.imshow("Gestures", result.annotated_frame)
|
|
65
|
+
if cv2.waitKey(1) & 0xFF == ord('q'):
|
|
66
|
+
break
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
---
|
|
70
|
+
|
|
71
|
+
## Supported Gestures
|
|
72
|
+
|
|
73
|
+
| Gesture | Returns |
|
|
74
|
+
|---------|---------|
|
|
75
|
+
| Numbers 1 – 10 | `"1"` – `"10"` |
|
|
76
|
+
| Arithmetic ops | `"plus"` `"minus"` `"multiply"` `"divide"` |
|
|
77
|
+
| Calculator | `"equal"` `"clear"` `"0"` |
|
|
78
|
+
| Nothing / unrecognised | `"unknown"` |
|
|
79
|
+
|
|
80
|
+
---
|
|
81
|
+
|
|
82
|
+
## Custom Model
|
|
83
|
+
|
|
84
|
+
```python
|
|
85
|
+
# Your model checked first — bundled used as fallback
|
|
86
|
+
detector = GestureDetector(model_path="my_gestures.task")
|
|
87
|
+
```
|
|
88
|
+
|
|
89
|
+
---
|
|
90
|
+
|
|
91
|
+
## Acknowledgements
|
|
92
|
+
|
|
93
|
+
- [MediaPipe](https://ai.google.dev/edge/mediapipe/solutions/guide) — hand tracking & gesture recognition
|
|
94
|
+
- [OpenCV](https://opencv.org/) — image & video processing
|
|
95
|
+
|
|
96
|
+
---
|
|
97
|
+
|
|
98
|
+
## License
|
|
99
|
+
|
|
100
|
+
MIT © [Debabrata Saha](https://github.com/debabratasaha-dev)
|
|
101
|
+
See [LICENSE](./LICENSE.txt) for details.
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
# MP-Gesture-Lib
|
|
2
|
+
|
|
3
|
+
A plug-and-play **hand gesture recognition module** built on MediaPipe and OpenCV.
|
|
4
|
+
Detect gesture names + confidence from a webcam frame in **one function call**.
|
|
5
|
+
|
|
6
|
+
📖 **[Full Documentation →](https://debabratasaha-dev.github.io/mp-gesture-lib-package)**
|
|
7
|
+
|
|
8
|
+
---
|
|
9
|
+
|
|
10
|
+
## Install
|
|
11
|
+
|
|
12
|
+
```bash
|
|
13
|
+
pip install mp-gesture-lib
|
|
14
|
+
```
|
|
15
|
+
|
|
16
|
+
> Requires Python >= 3.8 Bundled model included — no external files needed. (check `mediapipe` support for your python version)
|
|
17
|
+
|
|
18
|
+
---
|
|
19
|
+
|
|
20
|
+
## Quick Start
|
|
21
|
+
|
|
22
|
+
```python
|
|
23
|
+
import cv2
|
|
24
|
+
from mp_gesture_lib import GestureDetector
|
|
25
|
+
|
|
26
|
+
detector = GestureDetector() # zero-config
|
|
27
|
+
|
|
28
|
+
cap = cv2.VideoCapture(0)
|
|
29
|
+
while cap.isOpened():
|
|
30
|
+
ok, frame = cap.read()
|
|
31
|
+
result = detector.detect(frame)
|
|
32
|
+
|
|
33
|
+
print(result.gesture) # "plus", "3", "minus", "unknown" …
|
|
34
|
+
print(result.confidence) # 0.0 – 1.0
|
|
35
|
+
|
|
36
|
+
cv2.imshow("Gestures", result.annotated_frame)
|
|
37
|
+
if cv2.waitKey(1) & 0xFF == ord('q'):
|
|
38
|
+
break
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
---
|
|
42
|
+
|
|
43
|
+
## Supported Gestures
|
|
44
|
+
|
|
45
|
+
| Gesture | Returns |
|
|
46
|
+
|---------|---------|
|
|
47
|
+
| Numbers 1 – 10 | `"1"` – `"10"` |
|
|
48
|
+
| Arithmetic ops | `"plus"` `"minus"` `"multiply"` `"divide"` |
|
|
49
|
+
| Calculator | `"equal"` `"clear"` `"0"` |
|
|
50
|
+
| Nothing / unrecognised | `"unknown"` |
|
|
51
|
+
|
|
52
|
+
---
|
|
53
|
+
|
|
54
|
+
## Custom Model
|
|
55
|
+
|
|
56
|
+
```python
|
|
57
|
+
# Your model checked first — bundled used as fallback
|
|
58
|
+
detector = GestureDetector(model_path="my_gestures.task")
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
---
|
|
62
|
+
|
|
63
|
+
## Acknowledgements
|
|
64
|
+
|
|
65
|
+
- [MediaPipe](https://ai.google.dev/edge/mediapipe/solutions/guide) — hand tracking & gesture recognition
|
|
66
|
+
- [OpenCV](https://opencv.org/) — image & video processing
|
|
67
|
+
|
|
68
|
+
---
|
|
69
|
+
|
|
70
|
+
## License
|
|
71
|
+
|
|
72
|
+
MIT © [Debabrata Saha](https://github.com/debabratasaha-dev)
|
|
73
|
+
See [LICENSE](./LICENSE.txt) for details.
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
"""
|
|
2
|
+
mp_gesture_lib
|
|
3
|
+
==============
|
|
4
|
+
A plug-and-play gesture recognition module built on MediaPipe and OpenCV.
|
|
5
|
+
|
|
6
|
+
Quick-start
|
|
7
|
+
-----------
|
|
8
|
+
from mp_gesture_lib import GestureDetector
|
|
9
|
+
|
|
10
|
+
# Zero-config: bundled model loads automatically
|
|
11
|
+
detector = GestureDetector()
|
|
12
|
+
|
|
13
|
+
# Or supply your own custom model (checked first, bundled used as fallback)
|
|
14
|
+
detector = GestureDetector(model_path="my_gestures.task")
|
|
15
|
+
|
|
16
|
+
result = detector.detect(frame) # pass a BGR numpy frame
|
|
17
|
+
print(result.gesture, result.confidence) # e.g. "plus" 0.93
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
from .detector import GestureDetector, GestureResult
|
|
21
|
+
|
|
22
|
+
__all__ = ["GestureDetector", "GestureResult"]
|
|
23
|
+
__version__ = "1.0.0"
|
|
@@ -0,0 +1,473 @@
|
|
|
1
|
+
"""
|
|
2
|
+
detector.py – Core gesture detection engine
|
|
3
|
+
==============================================
|
|
4
|
+
Wraps MediaPipe GestureRecognizer + rule-based landmark geometry checks
|
|
5
|
+
into a single, reusable class.
|
|
6
|
+
|
|
7
|
+
Detection pipeline (in order — breaks at first match)
|
|
8
|
+
------------------------------------------------------
|
|
9
|
+
1. User model – custom .task file supplied by caller (if any)
|
|
10
|
+
2. Bundled models– all .task files in gesture_module/models/ (auto-discovered)
|
|
11
|
+
3. Rule-based – two-hand geometry (plus / multiply)
|
|
12
|
+
4. Finger count – counts extended fingers → "1" – "10"
|
|
13
|
+
5. "unknown" – nothing matched
|
|
14
|
+
|
|
15
|
+
Threshold
|
|
16
|
+
---------
|
|
17
|
+
Default 0.30 (30 %). Each ML stage accepts the first gesture whose score
|
|
18
|
+
≥ threshold and immediately returns without falling through to the next stage.
|
|
19
|
+
|
|
20
|
+
Return value
|
|
21
|
+
------------
|
|
22
|
+
Every public method returns a ``GestureResult`` dataclass:
|
|
23
|
+
gesture : str – gesture name, or "unknown" if nothing matched
|
|
24
|
+
confidence : float – probability 0.0 – 1.0
|
|
25
|
+
Rule-based / finger-count → 1.0
|
|
26
|
+
ML detections → model score
|
|
27
|
+
"unknown" → 0.0
|
|
28
|
+
raw_label : str – internal label before any mapping (debug)
|
|
29
|
+
annotated_frame : np.ndarray | None – BGR frame with skeleton drawn
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
from __future__ import annotations
|
|
33
|
+
|
|
34
|
+
import os
|
|
35
|
+
import warnings
|
|
36
|
+
from dataclasses import dataclass, field
|
|
37
|
+
from typing import Optional
|
|
38
|
+
|
|
39
|
+
import cv2
|
|
40
|
+
import mediapipe as mp
|
|
41
|
+
import numpy as np
|
|
42
|
+
from mediapipe import solutions
|
|
43
|
+
from mediapipe.framework.formats import landmark_pb2
|
|
44
|
+
from mediapipe.tasks import python as mp_python
|
|
45
|
+
from mediapipe.tasks.python import vision
|
|
46
|
+
|
|
47
|
+
from .registry import get_bundled_model_paths
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
# ---------------------------------------------------------------------------
|
|
51
|
+
# Public result type
|
|
52
|
+
# ---------------------------------------------------------------------------
|
|
53
|
+
|
|
54
|
+
@dataclass
|
|
55
|
+
class GestureResult:
|
|
56
|
+
"""
|
|
57
|
+
Returned by every detection call.
|
|
58
|
+
|
|
59
|
+
Attributes
|
|
60
|
+
----------
|
|
61
|
+
gesture : str
|
|
62
|
+
Human-readable gesture name.
|
|
63
|
+
Examples: "plus", "minus", "multiply", "divide", "equal",
|
|
64
|
+
"clear", "0"–"10", or any label from a custom model, or "unknown".
|
|
65
|
+
confidence : float
|
|
66
|
+
Probability estimate in [0.0, 1.0].
|
|
67
|
+
Rule-based detections return 1.0.
|
|
68
|
+
ML detections return the model score.
|
|
69
|
+
"unknown" returns 0.0.
|
|
70
|
+
raw_label : str
|
|
71
|
+
Internal label before any mapping (useful for debugging).
|
|
72
|
+
annotated_frame : Optional[np.ndarray]
|
|
73
|
+
BGR frame with hand landmarks drawn, or None if draw_landmarks=False.
|
|
74
|
+
"""
|
|
75
|
+
gesture: str
|
|
76
|
+
confidence: float
|
|
77
|
+
raw_label: str
|
|
78
|
+
annotated_frame: Optional[np.ndarray] = field(default=None, repr=False)
|
|
79
|
+
|
|
80
|
+
def __str__(self) -> str:
|
|
81
|
+
return (
|
|
82
|
+
f"GestureResult(gesture={self.gesture!r}, "
|
|
83
|
+
f"confidence={self.confidence:.2%})"
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
# ---------------------------------------------------------------------------
|
|
88
|
+
# Constants
|
|
89
|
+
# ---------------------------------------------------------------------------
|
|
90
|
+
|
|
91
|
+
# MediaPipe landmark indices for finger tips
|
|
92
|
+
_FINGER_TIP_IDS = [4, 8, 12, 16, 20]
|
|
93
|
+
|
|
94
|
+
# Default ML confidence threshold (70 %)
|
|
95
|
+
_DEFAULT_ML_THRESHOLD = 0.70
|
|
96
|
+
|
|
97
|
+
# MediaPipe returns these labels when no real gesture is matched.
|
|
98
|
+
# Must be filtered out — they are NOT gesture detections.
|
|
99
|
+
_MEDIAPIPE_NON_GESTURE_LABELS = {"", "None", "none", "Unknown", "unknown"}
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
# ---------------------------------------------------------------------------
|
|
103
|
+
# Internal landmark helpers
|
|
104
|
+
# ---------------------------------------------------------------------------
|
|
105
|
+
|
|
106
|
+
def _is_finger_extended(landmarks, finger_tip_idx: int) -> bool:
|
|
107
|
+
"""Return True when a non-thumb finger is extended (tip above PIP joint)."""
|
|
108
|
+
return landmarks[finger_tip_idx].y < landmarks[finger_tip_idx - 2].y
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
def _is_thumb_extended(landmarks, hand_type: str, thresh: float = 0.035) -> bool:
|
|
112
|
+
"""Return True when thumb is extended (logic differs for left vs right hand)."""
|
|
113
|
+
thumb_tip = landmarks[4]
|
|
114
|
+
index_tip = landmarks[8]
|
|
115
|
+
if hand_type == "Right":
|
|
116
|
+
return (thumb_tip.x - index_tip.x) > thresh
|
|
117
|
+
else:
|
|
118
|
+
return (index_tip.x - thumb_tip.x) > thresh
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def _count_extended_fingers(landmarks, hand_type: str) -> int:
|
|
122
|
+
"""Count extended fingers on a single hand (0 – 5)."""
|
|
123
|
+
count = 0
|
|
124
|
+
if _is_thumb_extended(landmarks, hand_type):
|
|
125
|
+
count += 1
|
|
126
|
+
for tip_id in _FINGER_TIP_IDS[1:]:
|
|
127
|
+
if _is_finger_extended(landmarks, tip_id):
|
|
128
|
+
count += 1
|
|
129
|
+
return count
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
def _draw_landmarks(rgb_image: np.ndarray, result) -> np.ndarray:
|
|
133
|
+
"""Draw hand landmarks on *rgb_image* and return annotated copy (RGB)."""
|
|
134
|
+
annotated = np.copy(rgb_image)
|
|
135
|
+
for hand_landmarks in result.hand_landmarks:
|
|
136
|
+
proto = landmark_pb2.NormalizedLandmarkList()
|
|
137
|
+
proto.landmark.extend([
|
|
138
|
+
landmark_pb2.NormalizedLandmark(x=lm.x, y=lm.y, z=lm.z)
|
|
139
|
+
for lm in hand_landmarks
|
|
140
|
+
])
|
|
141
|
+
solutions.drawing_utils.draw_landmarks(
|
|
142
|
+
annotated,
|
|
143
|
+
proto,
|
|
144
|
+
solutions.hands.HAND_CONNECTIONS,
|
|
145
|
+
solutions.drawing_styles.get_default_hand_landmarks_style(),
|
|
146
|
+
solutions.drawing_styles.get_default_hand_connections_style(),
|
|
147
|
+
)
|
|
148
|
+
return annotated
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def _build_recognizer(
|
|
152
|
+
model_path: str, num_hands: int
|
|
153
|
+
) -> vision.GestureRecognizer:
|
|
154
|
+
"""Create a MediaPipe GestureRecognizer from a .task file path."""
|
|
155
|
+
base_options = mp_python.BaseOptions(model_asset_path=model_path)
|
|
156
|
+
options = vision.GestureRecognizerOptions(
|
|
157
|
+
base_options=base_options,
|
|
158
|
+
num_hands=num_hands,
|
|
159
|
+
)
|
|
160
|
+
return vision.GestureRecognizer.create_from_options(options)
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
def _run_recognizer(
|
|
164
|
+
recognizer: vision.GestureRecognizer,
|
|
165
|
+
mp_img: mp.Image,
|
|
166
|
+
threshold: float,
|
|
167
|
+
) -> Optional[tuple[str, float, str]]:
|
|
168
|
+
"""
|
|
169
|
+
Run *recognizer* on *mp_img*.
|
|
170
|
+
|
|
171
|
+
Returns (gesture_name, confidence, raw_label) for the first gesture
|
|
172
|
+
whose score ≥ threshold AND whose label is a real gesture (not MediaPipe's
|
|
173
|
+
internal 'None' / background class), or None if nothing qualifies.
|
|
174
|
+
"""
|
|
175
|
+
result = recognizer.recognize(mp_img)
|
|
176
|
+
if not result.hand_landmarks:
|
|
177
|
+
return None
|
|
178
|
+
for idx in range(len(result.hand_landmarks)):
|
|
179
|
+
top = result.gestures[idx][0]
|
|
180
|
+
label = top.category_name
|
|
181
|
+
# Skip MediaPipe's 'no gesture' / background pseudo-labels
|
|
182
|
+
if label in _MEDIAPIPE_NON_GESTURE_LABELS:
|
|
183
|
+
continue
|
|
184
|
+
if top.score >= threshold:
|
|
185
|
+
return (label, float(top.score), f"ml:{label}")
|
|
186
|
+
return None
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
# ---------------------------------------------------------------------------
|
|
190
|
+
# Main detector class
|
|
191
|
+
# ---------------------------------------------------------------------------
|
|
192
|
+
|
|
193
|
+
class GestureDetector:
|
|
194
|
+
"""
|
|
195
|
+
Recognise hand gestures from a single BGR/RGB video frame.
|
|
196
|
+
|
|
197
|
+
Parameters
|
|
198
|
+
----------
|
|
199
|
+
model_path : str | None
|
|
200
|
+
Path to a custom ``.task`` MediaPipe model.
|
|
201
|
+
Pass ``None`` (default) to use only the bundled model(s).
|
|
202
|
+
When provided, this model is queried **first**; if it returns a
|
|
203
|
+
confident result the pipeline stops there.
|
|
204
|
+
num_hands : int
|
|
205
|
+
Maximum number of hands to track simultaneously (default 2).
|
|
206
|
+
ml_threshold : float
|
|
207
|
+
Minimum confidence score [0, 1] for accepting an ML prediction
|
|
208
|
+
(default 0.30). Applied at every ML stage.
|
|
209
|
+
draw_landmarks : bool
|
|
210
|
+
When True, ``GestureResult.annotated_frame`` contains the input
|
|
211
|
+
frame with hand skeleton drawn. Set False for performance paths.
|
|
212
|
+
|
|
213
|
+
Detection order
|
|
214
|
+
---------------
|
|
215
|
+
1. Custom user model (if ``model_path`` supplied)
|
|
216
|
+
2. All bundled models (``gesture_module/models/*.task``)
|
|
217
|
+
3. Rule-based two-hand geometry (plus / multiply)
|
|
218
|
+
4. Finger count (numbers 1 – 10)
|
|
219
|
+
5. "unknown"
|
|
220
|
+
|
|
221
|
+
Usage
|
|
222
|
+
-----
|
|
223
|
+
::
|
|
224
|
+
|
|
225
|
+
# Zero-config — bundled model loads automatically
|
|
226
|
+
detector = GestureDetector()
|
|
227
|
+
|
|
228
|
+
# Custom model — checked first, bundled used as fallback
|
|
229
|
+
detector = GestureDetector(model_path="my_gestures.task")
|
|
230
|
+
|
|
231
|
+
cap = cv2.VideoCapture(0)
|
|
232
|
+
while cap.isOpened():
|
|
233
|
+
ok, frame = cap.read()
|
|
234
|
+
result = detector.detect(frame)
|
|
235
|
+
print(result.gesture, result.confidence)
|
|
236
|
+
"""
|
|
237
|
+
|
|
238
|
+
def __init__(
|
|
239
|
+
self,
|
|
240
|
+
model_path: Optional[str] = None,
|
|
241
|
+
num_hands: int = 2,
|
|
242
|
+
ml_threshold: float = _DEFAULT_ML_THRESHOLD,
|
|
243
|
+
draw_landmarks: bool = True,
|
|
244
|
+
) -> None:
|
|
245
|
+
self._ml_threshold = ml_threshold
|
|
246
|
+
self._draw_landmarks = draw_landmarks
|
|
247
|
+
self._num_hands = num_hands
|
|
248
|
+
|
|
249
|
+
# Stage 1 – user-supplied custom model (optional)
|
|
250
|
+
self._user_recognizer: Optional[vision.GestureRecognizer] = None
|
|
251
|
+
if model_path is not None:
|
|
252
|
+
model_path = os.path.abspath(model_path)
|
|
253
|
+
if not os.path.exists(model_path):
|
|
254
|
+
raise FileNotFoundError(
|
|
255
|
+
f"Custom model not found: {model_path}"
|
|
256
|
+
)
|
|
257
|
+
self._user_recognizer = _build_recognizer(model_path, num_hands)
|
|
258
|
+
|
|
259
|
+
# Stage 2 – bundled models (auto-discovered from gesture_module/models/)
|
|
260
|
+
self._bundled_recognizers: list[vision.GestureRecognizer] = []
|
|
261
|
+
bundled_paths = get_bundled_model_paths()
|
|
262
|
+
if not bundled_paths:
|
|
263
|
+
warnings.warn(
|
|
264
|
+
"gesture_module: no bundled models found in gesture_module/models/. "
|
|
265
|
+
"Only custom model (if provided) and rule-based detection will work.",
|
|
266
|
+
RuntimeWarning,
|
|
267
|
+
stacklevel=2,
|
|
268
|
+
)
|
|
269
|
+
for path in bundled_paths:
|
|
270
|
+
self._bundled_recognizers.append(
|
|
271
|
+
_build_recognizer(path, num_hands)
|
|
272
|
+
)
|
|
273
|
+
|
|
274
|
+
# ------------------------------------------------------------------
|
|
275
|
+
# Public API
|
|
276
|
+
# ------------------------------------------------------------------
|
|
277
|
+
|
|
278
|
+
def detect(self, frame: np.ndarray, input_is_rgb: bool = False) -> GestureResult:
|
|
279
|
+
"""
|
|
280
|
+
Detect the dominant hand gesture in *frame*.
|
|
281
|
+
|
|
282
|
+
Parameters
|
|
283
|
+
----------
|
|
284
|
+
frame : np.ndarray
|
|
285
|
+
A single video frame. BGR by default (as from ``cv2.VideoCapture``).
|
|
286
|
+
Pass ``input_is_rgb=True`` for RGB arrays.
|
|
287
|
+
input_is_rgb : bool
|
|
288
|
+
Set True if *frame* is already in RGB format.
|
|
289
|
+
|
|
290
|
+
Returns
|
|
291
|
+
-------
|
|
292
|
+
GestureResult
|
|
293
|
+
Dataclass with ``gesture``, ``confidence``, ``raw_label``,
|
|
294
|
+
and optionally ``annotated_frame``.
|
|
295
|
+
"""
|
|
296
|
+
if frame is None or frame.size == 0:
|
|
297
|
+
raise ValueError("Received an empty frame.")
|
|
298
|
+
|
|
299
|
+
# Normalise to RGB for MediaPipe
|
|
300
|
+
rgb = frame if input_is_rgb else cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
|
301
|
+
mp_img = mp.Image(image_format=mp.ImageFormat.SRGB, data=rgb)
|
|
302
|
+
|
|
303
|
+
# We need at least one recognizer run to get hand landmarks for drawing.
|
|
304
|
+
# Use user recognizer if present, else first bundled recognizer.
|
|
305
|
+
primary_recognizer = (
|
|
306
|
+
self._user_recognizer
|
|
307
|
+
or (self._bundled_recognizers[0] if self._bundled_recognizers else None)
|
|
308
|
+
)
|
|
309
|
+
|
|
310
|
+
# Landmarks are drawn from primary recognition result
|
|
311
|
+
annotated: Optional[np.ndarray] = None
|
|
312
|
+
primary_recognition = None
|
|
313
|
+
if primary_recognizer is not None:
|
|
314
|
+
primary_recognition = primary_recognizer.recognize(mp_img)
|
|
315
|
+
if self._draw_landmarks:
|
|
316
|
+
drawn = _draw_landmarks(mp_img.numpy_view(), primary_recognition)
|
|
317
|
+
annotated = cv2.cvtColor(cv2.flip(drawn, 1), cv2.COLOR_RGB2BGR)
|
|
318
|
+
|
|
319
|
+
if not primary_recognition.hand_landmarks:
|
|
320
|
+
return GestureResult(
|
|
321
|
+
gesture="unknown",
|
|
322
|
+
confidence=0.0,
|
|
323
|
+
raw_label="no_hand",
|
|
324
|
+
annotated_frame=annotated,
|
|
325
|
+
)
|
|
326
|
+
else:
|
|
327
|
+
# No recognizers at all — still try rule/finger checks via a
|
|
328
|
+
# MediaPipe Hands detection? Without a recognizer we can't get
|
|
329
|
+
# landmark data. Warn and return unknown.
|
|
330
|
+
warnings.warn(
|
|
331
|
+
"No recognizers available. Install bundled models or supply model_path.",
|
|
332
|
+
RuntimeWarning,
|
|
333
|
+
stacklevel=2,
|
|
334
|
+
)
|
|
335
|
+
return GestureResult(
|
|
336
|
+
gesture="unknown",
|
|
337
|
+
confidence=0.0,
|
|
338
|
+
raw_label="no_recognizer",
|
|
339
|
+
annotated_frame=annotated,
|
|
340
|
+
)
|
|
341
|
+
|
|
342
|
+
return self._classify(mp_img, primary_recognition, annotated)
|
|
343
|
+
|
|
344
|
+
# ------------------------------------------------------------------
|
|
345
|
+
# Private classification pipeline
|
|
346
|
+
# ------------------------------------------------------------------
|
|
347
|
+
|
|
348
|
+
def _classify(
|
|
349
|
+
self,
|
|
350
|
+
mp_img: mp.Image,
|
|
351
|
+
primary_recognition,
|
|
352
|
+
annotated_frame: Optional[np.ndarray],
|
|
353
|
+
) -> GestureResult:
|
|
354
|
+
"""
|
|
355
|
+
Run the full detection pipeline in priority order.
|
|
356
|
+
Breaks immediately at each stage if a confident result is found.
|
|
357
|
+
"""
|
|
358
|
+
|
|
359
|
+
# ── Stage 1: User custom model ────────────────────────────────
|
|
360
|
+
if self._user_recognizer is not None:
|
|
361
|
+
hit = _run_recognizer(self._user_recognizer, mp_img, self._ml_threshold)
|
|
362
|
+
if hit is not None:
|
|
363
|
+
gesture, confidence, raw = hit
|
|
364
|
+
return GestureResult(
|
|
365
|
+
gesture=gesture,
|
|
366
|
+
confidence=confidence,
|
|
367
|
+
raw_label=raw,
|
|
368
|
+
annotated_frame=annotated_frame,
|
|
369
|
+
)
|
|
370
|
+
|
|
371
|
+
# ── Stage 2: Bundled models (each in sequence, break on first hit) ──
|
|
372
|
+
for bundled_rec in self._bundled_recognizers:
|
|
373
|
+
hit = _run_recognizer(bundled_rec, mp_img, self._ml_threshold)
|
|
374
|
+
if hit is not None:
|
|
375
|
+
gesture, confidence, raw = hit
|
|
376
|
+
return GestureResult(
|
|
377
|
+
gesture=gesture,
|
|
378
|
+
confidence=confidence,
|
|
379
|
+
raw_label=raw,
|
|
380
|
+
annotated_frame=annotated_frame,
|
|
381
|
+
)
|
|
382
|
+
|
|
383
|
+
# ── Stage 3: Rule-based two-hand gestures ─────────────────────
|
|
384
|
+
rule_result = self._check_two_hand_rules(primary_recognition)
|
|
385
|
+
if rule_result is not None:
|
|
386
|
+
gesture, raw = rule_result
|
|
387
|
+
return GestureResult(
|
|
388
|
+
gesture=gesture,
|
|
389
|
+
confidence=1.0,
|
|
390
|
+
raw_label=raw,
|
|
391
|
+
annotated_frame=annotated_frame,
|
|
392
|
+
)
|
|
393
|
+
|
|
394
|
+
# ── Stage 4: Finger count ──────────────────────────────────────
|
|
395
|
+
count_result = self._check_finger_count(primary_recognition)
|
|
396
|
+
if count_result is not None:
|
|
397
|
+
count, raw = count_result
|
|
398
|
+
return GestureResult(
|
|
399
|
+
gesture=str(count),
|
|
400
|
+
confidence=1.0,
|
|
401
|
+
raw_label=raw,
|
|
402
|
+
annotated_frame=annotated_frame,
|
|
403
|
+
)
|
|
404
|
+
|
|
405
|
+
# ── Stage 5: Nothing matched ───────────────────────────────────
|
|
406
|
+
return GestureResult(
|
|
407
|
+
gesture="unknown",
|
|
408
|
+
confidence=0.0,
|
|
409
|
+
raw_label="no_match",
|
|
410
|
+
annotated_frame=annotated_frame,
|
|
411
|
+
)
|
|
412
|
+
|
|
413
|
+
def _check_two_hand_rules(self, recognition) -> Optional[tuple[str, str]]:
|
|
414
|
+
"""
|
|
415
|
+
Rule-based two-hand gesture detection.
|
|
416
|
+
|
|
417
|
+
Returns (gesture_name, raw_label) or None.
|
|
418
|
+
|
|
419
|
+
plus – right index-tip significantly above left index-tip;
|
|
420
|
+
both PIPs horizontally close (< 0.04 normalised units).
|
|
421
|
+
multiply – right index-tip x > left index-tip x AND roughly same y.
|
|
422
|
+
"""
|
|
423
|
+
hand_data: dict[str, dict] = {"Right": {}, "Left": {}}
|
|
424
|
+
|
|
425
|
+
for hand_landmarks, hand in zip(
|
|
426
|
+
recognition.hand_landmarks, recognition.handedness
|
|
427
|
+
):
|
|
428
|
+
hand_type = hand[0].category_name
|
|
429
|
+
hand_data[hand_type]["tip"] = hand_landmarks[8] # index tip
|
|
430
|
+
hand_data[hand_type]["pip"] = hand_landmarks[6] # index PIP
|
|
431
|
+
|
|
432
|
+
right = hand_data["Right"]
|
|
433
|
+
left = hand_data["Left"]
|
|
434
|
+
|
|
435
|
+
required = ("tip", "pip")
|
|
436
|
+
if not (all(k in right for k in required) and all(k in left for k in required)):
|
|
437
|
+
return None
|
|
438
|
+
if not all(v is not None for v in (right["tip"], left["tip"],
|
|
439
|
+
right["pip"], left["pip"])):
|
|
440
|
+
return None
|
|
441
|
+
|
|
442
|
+
# plus: right tip notably above left tip; pips horizontally close
|
|
443
|
+
if (
|
|
444
|
+
(right["tip"].y + 0.052) < left["tip"].y
|
|
445
|
+
and abs(left["pip"].x - right["pip"].x) < 0.04
|
|
446
|
+
):
|
|
447
|
+
return ("plus", "rule:plus")
|
|
448
|
+
|
|
449
|
+
# multiply: right tip to the right of left tip; roughly same height
|
|
450
|
+
if (
|
|
451
|
+
right["tip"].x > left["tip"].x
|
|
452
|
+
and right["tip"].y > (left["tip"].y - 0.01)
|
|
453
|
+
):
|
|
454
|
+
return ("multiply", "rule:multiply")
|
|
455
|
+
|
|
456
|
+
return None
|
|
457
|
+
|
|
458
|
+
def _check_finger_count(self, recognition) -> Optional[tuple[int, str]]:
|
|
459
|
+
"""
|
|
460
|
+
Count extended fingers across all detected hands (1 – 10).
|
|
461
|
+
|
|
462
|
+
Returns (count, raw_label) or None if no fingers are extended.
|
|
463
|
+
"""
|
|
464
|
+
total = 0
|
|
465
|
+
for hand_landmarks, hand in zip(
|
|
466
|
+
recognition.hand_landmarks, recognition.handedness
|
|
467
|
+
):
|
|
468
|
+
hand_type = hand[0].category_name
|
|
469
|
+
total += _count_extended_fingers(hand_landmarks, hand_type)
|
|
470
|
+
|
|
471
|
+
if total > 0:
|
|
472
|
+
return (total, f"rule:fingers:{total}")
|
|
473
|
+
return None
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
# Makes gesture_module/models a sub-package so importlib.resources can locate files here.
|
|
Binary file
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
"""
|
|
2
|
+
registry.py – Bundled model discovery
|
|
3
|
+
=========================================
|
|
4
|
+
Scans the ``mp_gesture_lib/models/`` directory for all ``*.task`` files
|
|
5
|
+
using ``importlib.resources`` so paths resolve correctly both during
|
|
6
|
+
local development AND after a ``pip install``.
|
|
7
|
+
|
|
8
|
+
Adding a new bundled model
|
|
9
|
+
--------------------------
|
|
10
|
+
Drop any ``*.task`` file into ``mp_gesture_lib/models/`` — it is picked up
|
|
11
|
+
automatically on the next import. No code change required.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
import os
|
|
17
|
+
from importlib import resources
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def get_bundled_model_paths() -> list[str]:
|
|
21
|
+
"""
|
|
22
|
+
Return absolute paths to every ``*.task`` file bundled in
|
|
23
|
+
``mp_gesture_lib/models/``.
|
|
24
|
+
|
|
25
|
+
Returns
|
|
26
|
+
-------
|
|
27
|
+
list[str]
|
|
28
|
+
Sorted list of absolute path strings (sorted for determinism).
|
|
29
|
+
Empty list if the models directory exists but contains no ``.task`` files.
|
|
30
|
+
"""
|
|
31
|
+
try:
|
|
32
|
+
models_pkg = resources.files("mp_gesture_lib.models")
|
|
33
|
+
paths: list[str] = []
|
|
34
|
+
|
|
35
|
+
for item in models_pkg.iterdir():
|
|
36
|
+
# resources.files() returns Traversable objects; convert to str path
|
|
37
|
+
name = item.name
|
|
38
|
+
if name.endswith(".task"):
|
|
39
|
+
# Write to a temp location only if it's a non-filesystem resource
|
|
40
|
+
# (e.g., inside a zip). For normal installs this is a real file path.
|
|
41
|
+
if hasattr(item, "_path"):
|
|
42
|
+
# Internal attr on _NamespacePath
|
|
43
|
+
paths.append(str(item._path))
|
|
44
|
+
else:
|
|
45
|
+
# Standard approach: resolve to string via __str__ / os.fspath
|
|
46
|
+
resolved = str(item)
|
|
47
|
+
if os.path.exists(resolved):
|
|
48
|
+
paths.append(resolved)
|
|
49
|
+
else:
|
|
50
|
+
# Fallback: extract to a temp file (zip-installed packages)
|
|
51
|
+
import tempfile
|
|
52
|
+
suffix = f"_{name}"
|
|
53
|
+
tmp = tempfile.NamedTemporaryFile(
|
|
54
|
+
suffix=suffix, delete=False
|
|
55
|
+
)
|
|
56
|
+
tmp.write(item.read_bytes())
|
|
57
|
+
tmp.close()
|
|
58
|
+
paths.append(tmp.name)
|
|
59
|
+
|
|
60
|
+
return sorted(paths)
|
|
61
|
+
|
|
62
|
+
except (ModuleNotFoundError, TypeError, AttributeError) as exc:
|
|
63
|
+
# If package structure is unexpected, return empty list — caller handles it
|
|
64
|
+
import warnings
|
|
65
|
+
warnings.warn(
|
|
66
|
+
f"mp_gesture_lib: could not discover bundled models: {exc}",
|
|
67
|
+
RuntimeWarning,
|
|
68
|
+
stacklevel=2,
|
|
69
|
+
)
|
|
70
|
+
return []
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: mp-gesture-lib
|
|
3
|
+
Version: 1.0.0
|
|
4
|
+
Summary: Plug-and-play hand gesture recognition module built on MediaPipe and OpenCV
|
|
5
|
+
License: MIT
|
|
6
|
+
Project-URL: Homepage, https://debabratasaha-dev.github.io/mp-gesture-lib-package
|
|
7
|
+
Project-URL: Repository, https://github.com/debabratasaha-dev/mp-gesture-lib-package
|
|
8
|
+
Project-URL: Bug Tracker, https://github.com/debabratasaha-dev/mp-gesture-lib-package/issues
|
|
9
|
+
Keywords: mediapipe,gesture,hand,recognition,computer-vision,opencv
|
|
10
|
+
Classifier: Development Status :: 4 - Beta
|
|
11
|
+
Classifier: Intended Audience :: Developers
|
|
12
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
13
|
+
Classifier: Programming Language :: Python :: 3
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
19
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
20
|
+
Classifier: Topic :: Scientific/Engineering :: Image Recognition
|
|
21
|
+
Requires-Python: <3.13,>=3.8
|
|
22
|
+
Description-Content-Type: text/markdown
|
|
23
|
+
License-File: LICENSE.txt
|
|
24
|
+
Requires-Dist: mediapipe>=0.10.0
|
|
25
|
+
Requires-Dist: opencv-python>=4.5.0
|
|
26
|
+
Requires-Dist: numpy>=1.21.0
|
|
27
|
+
Dynamic: license-file
|
|
28
|
+
|
|
29
|
+
# MP-Gesture-Lib
|
|
30
|
+
|
|
31
|
+
A plug-and-play **hand gesture recognition module** built on MediaPipe and OpenCV.
|
|
32
|
+
Detect gesture names + confidence from a webcam frame in **one function call**.
|
|
33
|
+
|
|
34
|
+
📖 **[Full Documentation →](https://debabratasaha-dev.github.io/mp-gesture-lib-package)**
|
|
35
|
+
|
|
36
|
+
---
|
|
37
|
+
|
|
38
|
+
## Install
|
|
39
|
+
|
|
40
|
+
```bash
|
|
41
|
+
pip install mp-gesture-lib
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
> Requires Python >= 3.8 Bundled model included — no external files needed. (check `mediapipe` support for your python version)
|
|
45
|
+
|
|
46
|
+
---
|
|
47
|
+
|
|
48
|
+
## Quick Start
|
|
49
|
+
|
|
50
|
+
```python
|
|
51
|
+
import cv2
|
|
52
|
+
from mp_gesture_lib import GestureDetector
|
|
53
|
+
|
|
54
|
+
detector = GestureDetector() # zero-config
|
|
55
|
+
|
|
56
|
+
cap = cv2.VideoCapture(0)
|
|
57
|
+
while cap.isOpened():
|
|
58
|
+
ok, frame = cap.read()
|
|
59
|
+
result = detector.detect(frame)
|
|
60
|
+
|
|
61
|
+
print(result.gesture) # "plus", "3", "minus", "unknown" …
|
|
62
|
+
print(result.confidence) # 0.0 – 1.0
|
|
63
|
+
|
|
64
|
+
cv2.imshow("Gestures", result.annotated_frame)
|
|
65
|
+
if cv2.waitKey(1) & 0xFF == ord('q'):
|
|
66
|
+
break
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
---
|
|
70
|
+
|
|
71
|
+
## Supported Gestures
|
|
72
|
+
|
|
73
|
+
| Gesture | Returns |
|
|
74
|
+
|---------|---------|
|
|
75
|
+
| Numbers 1 – 10 | `"1"` – `"10"` |
|
|
76
|
+
| Arithmetic ops | `"plus"` `"minus"` `"multiply"` `"divide"` |
|
|
77
|
+
| Calculator | `"equal"` `"clear"` `"0"` |
|
|
78
|
+
| Nothing / unrecognised | `"unknown"` |
|
|
79
|
+
|
|
80
|
+
---
|
|
81
|
+
|
|
82
|
+
## Custom Model
|
|
83
|
+
|
|
84
|
+
```python
|
|
85
|
+
# Your model checked first — bundled used as fallback
|
|
86
|
+
detector = GestureDetector(model_path="my_gestures.task")
|
|
87
|
+
```
|
|
88
|
+
|
|
89
|
+
---
|
|
90
|
+
|
|
91
|
+
## Acknowledgements
|
|
92
|
+
|
|
93
|
+
- [MediaPipe](https://ai.google.dev/edge/mediapipe/solutions/guide) — hand tracking & gesture recognition
|
|
94
|
+
- [OpenCV](https://opencv.org/) — image & video processing
|
|
95
|
+
|
|
96
|
+
---
|
|
97
|
+
|
|
98
|
+
## License
|
|
99
|
+
|
|
100
|
+
MIT © [Debabrata Saha](https://github.com/debabratasaha-dev)
|
|
101
|
+
See [LICENSE](./LICENSE.txt) for details.
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
LICENSE.txt
|
|
2
|
+
README.md
|
|
3
|
+
pyproject.toml
|
|
4
|
+
mp_gesture_lib/__init__.py
|
|
5
|
+
mp_gesture_lib/detector.py
|
|
6
|
+
mp_gesture_lib/registry.py
|
|
7
|
+
mp_gesture_lib.egg-info/PKG-INFO
|
|
8
|
+
mp_gesture_lib.egg-info/SOURCES.txt
|
|
9
|
+
mp_gesture_lib.egg-info/dependency_links.txt
|
|
10
|
+
mp_gesture_lib.egg-info/requires.txt
|
|
11
|
+
mp_gesture_lib.egg-info/top_level.txt
|
|
12
|
+
mp_gesture_lib/models/__init__.py
|
|
13
|
+
mp_gesture_lib/models/operations.task
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
mp_gesture_lib
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=68", "wheel"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "mp-gesture-lib" # pip install mp-gesture-lib
|
|
7
|
+
version = "1.0.0"
|
|
8
|
+
description = "Plug-and-play hand gesture recognition module built on MediaPipe and OpenCV"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
license = { text = "MIT" }
|
|
11
|
+
requires-python = ">=3.8, <3.13"
|
|
12
|
+
keywords = [
|
|
13
|
+
"mediapipe",
|
|
14
|
+
"gesture",
|
|
15
|
+
"hand",
|
|
16
|
+
"recognition",
|
|
17
|
+
"computer-vision",
|
|
18
|
+
"opencv",
|
|
19
|
+
]
|
|
20
|
+
classifiers = [
|
|
21
|
+
"Development Status :: 4 - Beta",
|
|
22
|
+
"Intended Audience :: Developers",
|
|
23
|
+
"License :: OSI Approved :: MIT License",
|
|
24
|
+
"Programming Language :: Python :: 3",
|
|
25
|
+
"Programming Language :: Python :: 3.8",
|
|
26
|
+
"Programming Language :: Python :: 3.9",
|
|
27
|
+
"Programming Language :: Python :: 3.10",
|
|
28
|
+
"Programming Language :: Python :: 3.11",
|
|
29
|
+
"Programming Language :: Python :: 3.12",
|
|
30
|
+
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
|
31
|
+
"Topic :: Scientific/Engineering :: Image Recognition",
|
|
32
|
+
]
|
|
33
|
+
dependencies = ["mediapipe>=0.10.0", "opencv-python>=4.5.0", "numpy>=1.21.0"]
|
|
34
|
+
|
|
35
|
+
[project.urls]
|
|
36
|
+
Homepage = "https://debabratasaha-dev.github.io/mp-gesture-lib-package"
|
|
37
|
+
Repository = "https://github.com/debabratasaha-dev/mp-gesture-lib-package"
|
|
38
|
+
"Bug Tracker" = "https://github.com/debabratasaha-dev/mp-gesture-lib-package/issues"
|
|
39
|
+
|
|
40
|
+
[tool.setuptools.packages.find]
|
|
41
|
+
include = ["mp_gesture_lib*"] # include mp_gesture_lib and all sub-packages
|
|
42
|
+
|
|
43
|
+
[tool.setuptools.package-data]
|
|
44
|
+
"mp_gesture_lib.models" = ["*.task"] # bundle the .task model files
|