lipreader 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Parham Fakhari
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,3 @@
1
+ include README.md
2
+ include LICENSE
3
+ include requirements.txt
@@ -0,0 +1,151 @@
1
+ Metadata-Version: 2.4
2
+ Name: lipreader
3
+ Version: 0.1.0
4
+ Summary: A CPU-only lip reading toolkit for command recognition from video
5
+ Home-page: https://github.com/parhamfakhar1/lipreader
6
+ Author: Parham Fakhari
7
+ Author-email: parhamfakhari.nab2020@gmail.com
8
+ License: MIT
9
+ Project-URL: Bug Tracker, https://github.com/parhamfakhar1/lipreader/issues
10
+ Project-URL: Source Code, https://github.com/parhamfakhar1/lipreader
11
+ Classifier: Development Status :: 4 - Beta
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: Intended Audience :: Science/Research
14
+ Classifier: License :: OSI Approved :: MIT License
15
+ Classifier: Operating System :: OS Independent
16
+ Classifier: Programming Language :: Python :: 3
17
+ Classifier: Programming Language :: Python :: 3.7
18
+ Classifier: Programming Language :: Python :: 3.8
19
+ Classifier: Programming Language :: Python :: 3.9
20
+ Classifier: Programming Language :: Python :: 3.10
21
+ Classifier: Programming Language :: Python :: 3.11
22
+ Classifier: Topic :: Multimedia :: Video
23
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
24
+ Requires-Python: >=3.7
25
+ Description-Content-Type: text/markdown
26
+ License-File: LICENSE
27
+ Dynamic: author
28
+ Dynamic: author-email
29
+ Dynamic: classifier
30
+ Dynamic: description
31
+ Dynamic: description-content-type
32
+ Dynamic: home-page
33
+ Dynamic: license
34
+ Dynamic: license-file
35
+ Dynamic: project-url
36
+ Dynamic: requires-python
37
+ Dynamic: summary
38
+
39
+
40
+ # LipReader
41
+
42
+ A lightweight, CPU-only lip reading toolkit for command recognition from video.
43
+ No GPU required — runs efficiently on Intel i5 and similar systems.
44
+
45
+ ## ✨ Features
46
+
47
+ - **CPU-only**: No GPU or deep learning dependencies.
48
+ - **CLI & API**: Use via command line or import as a Python library.
49
+ - **Trainable**: Learn custom lip motion patterns from your own videos.
50
+ - **JSON-based**: All data stored in human-readable JSON format.
51
+ - **Real-time ready**: Optimized for low-latency inference.
52
+
53
+ ## 📦 Installation
54
+
55
+ Install in development mode (recommended):
56
+
57
+ ```bash
58
+ git clone https://github.com/Parhamfakhar1/lipreader.git
59
+ cd lipreader
60
+ pip install -e .
61
+ ```
62
+
63
+ > Requires: Python 3.7+, OpenCV, NumPy
64
+
65
+ ## 🚀 Usage
66
+
67
+ ### Train a new command
68
+
69
+ ```bash
70
+ lipreader train --video start.mp4 --word start
71
+ ```
72
+
73
+ You can train the same word multiple times with different videos:
74
+
75
+ ```bash
76
+ lipreader train -v start1.mp4 -w start
77
+ lipreader train -v start2.mp4 -w start
78
+ ```
79
+
80
+ ### Predict from a video
81
+
82
+ ```bash
83
+ lipreader predict --video test.mp4
84
+ ```
85
+
86
+ **Sample output:**
87
+ ```
88
+ 🎯 Prediction: start
89
+
90
+ 📈 Probabilities:
91
+ start: 86.3%
92
+ stop: 13.7%
93
+ ```
94
+
95
+ ### CLI Options
96
+
97
+ | Flag | Description |
98
+ |------|-------------|
99
+ | `-v`, `--video` | Path to input video (MP4, AVI, etc.) |
100
+ | `-w`, `--word` | Label for training (e.g., "start", "stop") |
101
+ | `-d`, `--data` | Path to JSON data file (default: `lip_data.json`) |
102
+
103
+ ## 💻 Python API
104
+
105
+ Use `LipReader` directly in your code:
106
+
107
+ ```python
108
+ from lipreader import LipReader
109
+
110
+ # Initialize
111
+ reader = LipReader("commands.json")
112
+
113
+ # Train
114
+ reader.train("start.mp4", "start")
115
+
116
+ # Predict
117
+ predicted_word, probabilities = reader.predict("unknown.mp4")
118
+ print(f"Detected: {predicted_word}")
119
+ ```
120
+
121
+ ## 🗃️ Data Format
122
+
123
+ All trained patterns are saved in `lip_data.json`:
124
+
125
+ ```json
126
+ {
127
+ "start": {
128
+ "samples": [
129
+ {
130
+ "avg_ratio": 1.28,
131
+ "ratio_std": 0.25,
132
+ "min_ratio": 0.78,
133
+ "max_ratio": 1.88,
134
+ "frame_count": 120,
135
+ "video": "start1.mp4"
136
+ }
137
+ ]
138
+ }
139
+ }
140
+ ```
141
+
142
+ ## ⚠️ Limitations
143
+
144
+ - Works best in **good lighting** with **front-facing video**.
145
+ - Accuracy depends on **clear lip motion** (silent articulation works).
146
+ - Not designed for full-sentence lip reading — optimized for **short commands**.
147
+
148
+ ## 📄 License
149
+
150
+ MIT License
151
+ ```
@@ -0,0 +1,113 @@
1
+
2
+ # LipReader
3
+
4
+ A lightweight, CPU-only lip reading toolkit for command recognition from video.
5
+ No GPU required — runs efficiently on Intel i5 and similar systems.
6
+
7
+ ## ✨ Features
8
+
9
+ - **CPU-only**: No GPU or deep learning dependencies.
10
+ - **CLI & API**: Use via command line or import as a Python library.
11
+ - **Trainable**: Learn custom lip motion patterns from your own videos.
12
+ - **JSON-based**: All data stored in human-readable JSON format.
13
+ - **Real-time ready**: Optimized for low-latency inference.
14
+
15
+ ## 📦 Installation
16
+
17
+ Install in development mode (recommended):
18
+
19
+ ```bash
20
+ git clone https://github.com/Parhamfakhar1/lipreader.git
21
+ cd lipreader
22
+ pip install -e .
23
+ ```
24
+
25
+ > Requires: Python 3.7+, OpenCV, NumPy
26
+
27
+ ## 🚀 Usage
28
+
29
+ ### Train a new command
30
+
31
+ ```bash
32
+ lipreader train --video start.mp4 --word start
33
+ ```
34
+
35
+ You can train the same word multiple times with different videos:
36
+
37
+ ```bash
38
+ lipreader train -v start1.mp4 -w start
39
+ lipreader train -v start2.mp4 -w start
40
+ ```
41
+
42
+ ### Predict from a video
43
+
44
+ ```bash
45
+ lipreader predict --video test.mp4
46
+ ```
47
+
48
+ **Sample output:**
49
+ ```
50
+ 🎯 Prediction: start
51
+
52
+ 📈 Probabilities:
53
+ start: 86.3%
54
+ stop: 13.7%
55
+ ```
56
+
57
+ ### CLI Options
58
+
59
+ | Flag | Description |
60
+ |------|-------------|
61
+ | `-v`, `--video` | Path to input video (MP4, AVI, etc.) |
62
+ | `-w`, `--word` | Label for training (e.g., "start", "stop") |
63
+ | `-d`, `--data` | Path to JSON data file (default: `lip_data.json`) |
64
+
65
+ ## 💻 Python API
66
+
67
+ Use `LipReader` directly in your code:
68
+
69
+ ```python
70
+ from lipreader import LipReader
71
+
72
+ # Initialize
73
+ reader = LipReader("commands.json")
74
+
75
+ # Train
76
+ reader.train("start.mp4", "start")
77
+
78
+ # Predict
79
+ predicted_word, probabilities = reader.predict("unknown.mp4")
80
+ print(f"Detected: {predicted_word}")
81
+ ```
82
+
83
+ ## 🗃️ Data Format
84
+
85
+ All trained patterns are saved in `lip_data.json`:
86
+
87
+ ```json
88
+ {
89
+ "start": {
90
+ "samples": [
91
+ {
92
+ "avg_ratio": 1.28,
93
+ "ratio_std": 0.25,
94
+ "min_ratio": 0.78,
95
+ "max_ratio": 1.88,
96
+ "frame_count": 120,
97
+ "video": "start1.mp4"
98
+ }
99
+ ]
100
+ }
101
+ }
102
+ ```
103
+
104
+ ## ⚠️ Limitations
105
+
106
+ - Works best in **good lighting** with **front-facing video**.
107
+ - Accuracy depends on **clear lip motion** (silent articulation works).
108
+ - Not designed for full-sentence lip reading — optimized for **short commands**.
109
+
110
+ ## 📄 License
111
+
112
+ MIT License
113
+ ```
@@ -0,0 +1,3 @@
1
+ from .core import LipReader
2
+
3
+ __version__ = "0.1.0"
@@ -0,0 +1,45 @@
1
+ import argparse
2
+ import sys
3
+ from .core import LipReader
4
+
5
+ def main():
6
+ parser = argparse.ArgumentParser(
7
+ description="Lip Reading CLI — Train and predict lip motion patterns.",
8
+ prog="lipreader"
9
+ )
10
+ subparsers = parser.add_subparsers(dest="command", required=True)
11
+
12
+ # Train command
13
+ train_parser = subparsers.add_parser("train", help="Train a word from video")
14
+ train_parser.add_argument("--video", "-v", required=True, help="Input video path")
15
+ train_parser.add_argument("--word", "-w", required=True, help="Target word/label")
16
+ train_parser.add_argument("--data", "-d", default="lip_data.json", help="Data file")
17
+
18
+ # Predict command
19
+ pred_parser = subparsers.add_parser("predict", help="Predict word from video")
20
+ pred_parser.add_argument("--video", "-v", required=True, help="Test video path")
21
+ pred_parser.add_argument("--data", "-d", default="lip_data.json", help="Data file")
22
+
23
+ args = parser.parse_args()
24
+
25
+ try:
26
+ reader = LipReader(args.data)
27
+
28
+ if args.command == "train":
29
+ stats = reader.train(args.video, args.word)
30
+ print(f"✅ Trained word '{args.word}'")
31
+ print(f" Avg Ratio: {stats['avg_ratio']:.2f} ± {stats['ratio_std']:.2f}")
32
+
33
+ elif args.command == "predict":
34
+ pred, probs = reader.predict(args.video)
35
+ if pred is None:
36
+ print("⚠️ No match found.")
37
+ sys.exit(1)
38
+ print(f"🎯 Prediction: {pred}")
39
+ print("\n📈 Probabilities:")
40
+ for w in sorted(probs, key=probs.get, reverse=True):
41
+ print(f" {w}: {probs[w]:.1f}%")
42
+
43
+ except Exception as e:
44
+ print(f"❌ Error: {e}", file=sys.stderr)
45
+ sys.exit(1)
@@ -0,0 +1,104 @@
1
+ import cv2
2
+ import numpy as np
3
+ import json
4
+ import os
5
+
6
+ class LipReader:
7
+ def __init__(self, data_path="lip_data.json"):
8
+ self.data_path = data_path
9
+ self.face_cascade = cv2.CascadeClassifier(
10
+ cv2.data.haarcascades + 'haarcascade_frontalface_default.xml'
11
+ )
12
+ if self.face_cascade.empty():
13
+ raise RuntimeError("Failed to load Haar Cascade classifier.")
14
+
15
+ def extract_lip_ratio(self, frame, x, y, w, h):
16
+ face_roi = frame[y:y + h, x:x + w]
17
+ if face_roi.size == 0:
18
+ return None
19
+ gray = cv2.cvtColor(face_roi, cv2.COLOR_BGR2GRAY)
20
+ _, thresh = cv2.threshold(gray, 60, 255, cv2.THRESH_BINARY_INV)
21
+ kernel = np.ones((3, 3), np.uint8)
22
+ thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
23
+ thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
24
+ contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
25
+ if contours:
26
+ largest = max(contours, key=cv2.contourArea)
27
+ if cv2.contourArea(largest) > 100:
28
+ x_l, y_l, w_l, h_l = cv2.boundingRect(largest)
29
+ return w_l / (h_l + 1e-6)
30
+ return None
31
+
32
+ def process_video(self, video_path):
33
+ cap = cv2.VideoCapture(video_path)
34
+ if not cap.isOpened():
35
+ raise FileNotFoundError(f"Cannot open video: {video_path}")
36
+ ratios = []
37
+ while True:
38
+ ret, frame = cap.read()
39
+ if not ret:
40
+ break
41
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
42
+ faces = self.face_cascade.detectMultiScale(gray, 1.3, 5)
43
+ if len(faces) > 0:
44
+ x, y, w, h = max(faces, key=lambda r: r[2] * r[3])
45
+ ratio = self.extract_lip_ratio(frame, x, y, w, h)
46
+ if ratio is not None:
47
+ ratios.append(ratio)
48
+ cap.release()
49
+ if not ratios:
50
+ raise ValueError("No lip region detected in video.")
51
+ return {
52
+ "avg_ratio": float(np.mean(ratios)),
53
+ "ratio_std": float(np.std(ratios)),
54
+ "min_ratio": float(np.min(ratios)),
55
+ "max_ratio": float(np.max(ratios)),
56
+ "frame_count": len(ratios)
57
+ }
58
+
59
+ def load_data(self):
60
+ if os.path.exists(self.data_path):
61
+ with open(self.data_path, "r", encoding="utf-8") as f:
62
+ return json.load(f)
63
+ return {}
64
+
65
+ def save_data(self, data):
66
+ with open(self.data_path, "w", encoding="utf-8") as f:
67
+ json.dump(data, f, ensure_ascii=False, indent=2)
68
+
69
+ def train(self, video_path, word):
70
+ stats = self.process_video(video_path)
71
+ stats["video"] = os.path.basename(video_path)
72
+ data = self.load_data()
73
+ if word not in data:
74
+ data[word] = {"samples": [stats]}
75
+ else:
76
+ data[word]["samples"].append(stats)
77
+ self.save_data(data)
78
+ return stats
79
+
80
+ def predict(self, video_path):
81
+ test_stats = self.process_video(video_path)
82
+ data = self.load_data()
83
+ if not data:
84
+ raise ValueError("No trained words found. Train first with `.train()`.")
85
+
86
+ def similarity(test_avg, test_std, sample):
87
+ dist = abs(test_avg - sample["avg_ratio"])
88
+ std_diff = abs(test_std - sample["ratio_std"])
89
+ return max(0, 1.0 - (dist / 2.0) - (std_diff / 1.0))
90
+
91
+ scores = {}
92
+ for word, word_data in data.items():
93
+ score = np.mean([
94
+ similarity(test_stats["avg_ratio"], test_stats["ratio_std"], s)
95
+ for s in word_data["samples"]
96
+ ])
97
+ scores[word] = score
98
+
99
+ total = sum(scores.values())
100
+ if total == 0:
101
+ return None, {}
102
+ probabilities = {w: (s / total) * 100 for w, s in scores.items()}
103
+ prediction = max(probabilities, key=probabilities.get)
104
+ return prediction, probabilities
File without changes
@@ -0,0 +1,151 @@
1
+ Metadata-Version: 2.4
2
+ Name: lipreader
3
+ Version: 0.1.0
4
+ Summary: A CPU-only lip reading toolkit for command recognition from video
5
+ Home-page: https://github.com/parhamfakhar1/lipreader
6
+ Author: Parham Fakhari
7
+ Author-email: parhamfakhari.nab2020@gmail.com
8
+ License: MIT
9
+ Project-URL: Bug Tracker, https://github.com/parhamfakhar1/lipreader/issues
10
+ Project-URL: Source Code, https://github.com/parhamfakhar1/lipreader
11
+ Classifier: Development Status :: 4 - Beta
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: Intended Audience :: Science/Research
14
+ Classifier: License :: OSI Approved :: MIT License
15
+ Classifier: Operating System :: OS Independent
16
+ Classifier: Programming Language :: Python :: 3
17
+ Classifier: Programming Language :: Python :: 3.7
18
+ Classifier: Programming Language :: Python :: 3.8
19
+ Classifier: Programming Language :: Python :: 3.9
20
+ Classifier: Programming Language :: Python :: 3.10
21
+ Classifier: Programming Language :: Python :: 3.11
22
+ Classifier: Topic :: Multimedia :: Video
23
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
24
+ Requires-Python: >=3.7
25
+ Description-Content-Type: text/markdown
26
+ License-File: LICENSE
27
+ Dynamic: author
28
+ Dynamic: author-email
29
+ Dynamic: classifier
30
+ Dynamic: description
31
+ Dynamic: description-content-type
32
+ Dynamic: home-page
33
+ Dynamic: license
34
+ Dynamic: license-file
35
+ Dynamic: project-url
36
+ Dynamic: requires-python
37
+ Dynamic: summary
38
+
39
+
40
+ # LipReader
41
+
42
+ A lightweight, CPU-only lip reading toolkit for command recognition from video.
43
+ No GPU required — runs efficiently on Intel i5 and similar systems.
44
+
45
+ ## ✨ Features
46
+
47
+ - **CPU-only**: No GPU or deep learning dependencies.
48
+ - **CLI & API**: Use via command line or import as a Python library.
49
+ - **Trainable**: Learn custom lip motion patterns from your own videos.
50
+ - **JSON-based**: All data stored in human-readable JSON format.
51
+ - **Real-time ready**: Optimized for low-latency inference.
52
+
53
+ ## 📦 Installation
54
+
55
+ Install in development mode (recommended):
56
+
57
+ ```bash
58
+ git clone https://github.com/Parhamfakhar1/lipreader.git
59
+ cd lipreader
60
+ pip install -e .
61
+ ```
62
+
63
+ > Requires: Python 3.7+, OpenCV, NumPy
64
+
65
+ ## 🚀 Usage
66
+
67
+ ### Train a new command
68
+
69
+ ```bash
70
+ lipreader train --video start.mp4 --word start
71
+ ```
72
+
73
+ You can train the same word multiple times with different videos:
74
+
75
+ ```bash
76
+ lipreader train -v start1.mp4 -w start
77
+ lipreader train -v start2.mp4 -w start
78
+ ```
79
+
80
+ ### Predict from a video
81
+
82
+ ```bash
83
+ lipreader predict --video test.mp4
84
+ ```
85
+
86
+ **Sample output:**
87
+ ```
88
+ 🎯 Prediction: start
89
+
90
+ 📈 Probabilities:
91
+ start: 86.3%
92
+ stop: 13.7%
93
+ ```
94
+
95
+ ### CLI Options
96
+
97
+ | Flag | Description |
98
+ |------|-------------|
99
+ | `-v`, `--video` | Path to input video (MP4, AVI, etc.) |
100
+ | `-w`, `--word` | Label for training (e.g., "start", "stop") |
101
+ | `-d`, `--data` | Path to JSON data file (default: `lip_data.json`) |
102
+
103
+ ## 💻 Python API
104
+
105
+ Use `LipReader` directly in your code:
106
+
107
+ ```python
108
+ from lipreader import LipReader
109
+
110
+ # Initialize
111
+ reader = LipReader("commands.json")
112
+
113
+ # Train
114
+ reader.train("start.mp4", "start")
115
+
116
+ # Predict
117
+ predicted_word, probabilities = reader.predict("unknown.mp4")
118
+ print(f"Detected: {predicted_word}")
119
+ ```
120
+
121
+ ## 🗃️ Data Format
122
+
123
+ All trained patterns are saved in `lip_data.json`:
124
+
125
+ ```json
126
+ {
127
+ "start": {
128
+ "samples": [
129
+ {
130
+ "avg_ratio": 1.28,
131
+ "ratio_std": 0.25,
132
+ "min_ratio": 0.78,
133
+ "max_ratio": 1.88,
134
+ "frame_count": 120,
135
+ "video": "start1.mp4"
136
+ }
137
+ ]
138
+ }
139
+ }
140
+ ```
141
+
142
+ ## ⚠️ Limitations
143
+
144
+ - Works best in **good lighting** with **front-facing video**.
145
+ - Accuracy depends on **clear lip motion** (silent articulation works).
146
+ - Not designed for full-sentence lip reading — optimized for **short commands**.
147
+
148
+ ## 📄 License
149
+
150
+ MIT License
151
+ ```
@@ -0,0 +1,14 @@
1
+ LICENSE
2
+ MANIFEST.in
3
+ README.md
4
+ requirements.txt
5
+ setup.py
6
+ lipreader/__init__.py
7
+ lipreader/cli.py
8
+ lipreader/core.py
9
+ lipreader/utils.py
10
+ lipreader.egg-info/PKG-INFO
11
+ lipreader.egg-info/SOURCES.txt
12
+ lipreader.egg-info/dependency_links.txt
13
+ lipreader.egg-info/entry_points.txt
14
+ lipreader.egg-info/top_level.txt
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ lipreader = lipreader.cli:main
@@ -0,0 +1 @@
1
+ lipreader
File without changes
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,51 @@
1
+ from setuptools import setup, find_packages
2
+ import os
3
+
4
+ with open("README.md", "r", encoding="utf-8") as fh:
5
+ long_description = fh.read()
6
+
7
+ def read_requirements():
8
+ if os.path.exists("requirements.txt"):
9
+ with open("requirements.txt", "r", encoding="utf-8") as f:
10
+ return [line.strip() for line in f if line.strip() and not line.startswith("#")]
11
+ return ["opencv-python", "numpy"]
12
+
13
+ setup(
14
+ name="lipreader",
15
+ version="0.1.0",
16
+ author="Parham Fakhari",
17
+ author_email="parhamfakhari.nab2020@gmail.com",
18
+ description="A CPU-only lip reading toolkit for command recognition from video",
19
+ long_description=long_description,
20
+ long_description_content_type="text/markdown",
21
+ url="https://github.com/parhamfakhar1/lipreader",
22
+ project_urls={
23
+ "Bug Tracker": "https://github.com/parhamfakhar1/lipreader/issues",
24
+ "Source Code": "https://github.com/parhamfakhar1/lipreader",
25
+ },
26
+ license="MIT",
27
+ packages=find_packages(),
28
+ classifiers=[
29
+ "Development Status :: 4 - Beta",
30
+ "Intended Audience :: Developers",
31
+ "Intended Audience :: Science/Research",
32
+ "License :: OSI Approved :: MIT License",
33
+ "Operating System :: OS Independent",
34
+ "Programming Language :: Python :: 3",
35
+ "Programming Language :: Python :: 3.7",
36
+ "Programming Language :: Python :: 3.8",
37
+ "Programming Language :: Python :: 3.9",
38
+ "Programming Language :: Python :: 3.10",
39
+ "Programming Language :: Python :: 3.11",
40
+ "Topic :: Multimedia :: Video",
41
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
42
+ ],
43
+ python_requires=">=3.7",
44
+ install_requires=read_requirements(),
45
+ entry_points={
46
+ "console_scripts": [
47
+ "lipreader=lipreader.cli:main",
48
+ ],
49
+ },
50
+ include_package_data=True,
51
+ )