bithuman 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. bithuman/__init__.py +13 -0
  2. bithuman/_version.py +1 -0
  3. bithuman/api.py +164 -0
  4. bithuman/audio/__init__.py +19 -0
  5. bithuman/audio/audio.py +396 -0
  6. bithuman/audio/hparams.py +108 -0
  7. bithuman/audio/utils.py +255 -0
  8. bithuman/config.py +88 -0
  9. bithuman/engine/__init__.py +15 -0
  10. bithuman/engine/auth.py +335 -0
  11. bithuman/engine/compression.py +257 -0
  12. bithuman/engine/enums.py +16 -0
  13. bithuman/engine/image_ops.py +192 -0
  14. bithuman/engine/inference.py +108 -0
  15. bithuman/engine/knn.py +58 -0
  16. bithuman/engine/video_data.py +391 -0
  17. bithuman/engine/video_reader.py +168 -0
  18. bithuman/lib/__init__.py +1 -0
  19. bithuman/lib/audio_encoder.onnx +45631 -28
  20. bithuman/lib/generator.py +763 -0
  21. bithuman/lib/pth2h5.py +106 -0
  22. bithuman/plugins/__init__.py +0 -0
  23. bithuman/plugins/stt.py +185 -0
  24. bithuman/runtime.py +1004 -0
  25. bithuman/runtime_async.py +469 -0
  26. bithuman/service/__init__.py +9 -0
  27. bithuman/service/client.py +788 -0
  28. bithuman/service/messages.py +210 -0
  29. bithuman/service/server.py +759 -0
  30. bithuman/utils/__init__.py +43 -0
  31. bithuman/utils/agent.py +359 -0
  32. bithuman/utils/fps_controller.py +90 -0
  33. bithuman/utils/image.py +41 -0
  34. bithuman/utils/unzip.py +38 -0
  35. bithuman/video_graph/__init__.py +16 -0
  36. bithuman/video_graph/action_trigger.py +83 -0
  37. bithuman/video_graph/driver_video.py +482 -0
  38. bithuman/video_graph/navigator.py +736 -0
  39. bithuman/video_graph/trigger.py +90 -0
  40. bithuman/video_graph/video_script.py +344 -0
  41. bithuman-1.0.2.dist-info/METADATA +37 -0
  42. bithuman-1.0.2.dist-info/RECORD +44 -0
  43. bithuman-1.0.2.dist-info/WHEEL +5 -0
  44. bithuman-1.0.2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,90 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ from typing import List, Literal, Optional
5
+
6
+ from loguru import logger
7
+ from pydantic import BaseModel, Field
8
+
9
+
10
+ class TriggerData(BaseModel):
11
+ """Data to be sent when a trigger is activated"""
12
+
13
+ target_video: Optional[str] = None
14
+ actions: List[str] | str = Field(default_factory=list)
15
+ description: str = ""
16
+
17
+
18
+ class VideoActionTrigger(BaseModel):
19
+ """Base class for video action triggers"""
20
+
21
+ trigger_data: TriggerData = Field(
22
+ description="Data to be sent when trigger conditions are met"
23
+ )
24
+
25
+ def check_trigger(self, condition: any) -> Optional[TriggerData]:
26
+ """
27
+ Base method to check if trigger conditions are met
28
+ Args:
29
+ condition: The condition to check against (type varies by trigger type)
30
+ Returns:
31
+ TriggerData if triggered, None otherwise
32
+ """
33
+ return None
34
+
35
+ @classmethod
36
+ def from_json(cls, json_str: str) -> List["VideoActionTrigger"]:
37
+ """
38
+ Create KeywordTrigger instances from JSON string using Pydantic validation
39
+ Args:
40
+ json_str: JSON string containing trigger configurations
41
+ Returns:
42
+ List of validated KeywordTrigger instances
43
+ """
44
+ if not json_str:
45
+ return []
46
+ try:
47
+ triggers_data = json.loads(json_str)
48
+ return [
49
+ cls.model_validate_json(json.dumps(trigger))
50
+ for trigger in triggers_data
51
+ ]
52
+ except Exception as e:
53
+ logger.exception(f"Error parsing KeywordTrigger: {e}")
54
+ return []
55
+
56
+
57
+ class KeywordTrigger(VideoActionTrigger):
58
+ """Trigger that activates when specific keywords are detected"""
59
+
60
+ keywords: List[str] = Field(
61
+ description="List of keywords that can trigger this action"
62
+ )
63
+ trigger_source: Literal["user", "agent", "both"] = Field(
64
+ default="both", description="Who can trigger this action - user, agent, or both"
65
+ )
66
+
67
+ def check_trigger(
68
+ self, text: str, source: Literal["user", "agent"]
69
+ ) -> Optional[TriggerData]:
70
+ """
71
+ Check if the given text and source triggers this keyword
72
+ Args:
73
+ text: The text to check
74
+ source: The source of the text - either "user" or "agent"
75
+ Returns:
76
+ TriggerData if triggered, None otherwise
77
+ """
78
+ if self.trigger_source != "both" and source != self.trigger_source:
79
+ return None
80
+
81
+ if any(keyword.lower() in text.lower() for keyword in self.keywords):
82
+ return self.trigger_data
83
+ return None
84
+
85
+
86
+ if __name__ == "__main__":
87
+ triggers = KeywordTrigger.from_json(
88
+ '[{"keywords": ["goodbye", "bye"], "trigger_source": "user", "trigger_data": {"target_video": null, "actions": "wave", "description": "Wave animation"}}]', # noqa: E501
89
+ )
90
+ print(triggers)
@@ -0,0 +1,344 @@
1
+ """Definition of the script for the video graph."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import copy
6
+ import json
7
+ from collections import OrderedDict, defaultdict
8
+ from dataclasses import dataclass, field
9
+ from pathlib import Path
10
+ from typing import Any, Dict, List, Optional, Tuple
11
+
12
+ import numpy as np
13
+ import yaml
14
+ from dataclasses_json import dataclass_json
15
+ from loguru import logger
16
+
17
+ from ..api import Emotion, EmotionPrediction
18
+ from ..config import Settings
19
+ from .driver_video import DriverVideo, LoopingVideo, SingleActionVideo
20
+
21
+ VIDEO_EXTS = [".mp4", ".mov", ".avi"]
22
+
23
+
24
+ @dataclass_json
25
+ @dataclass
26
+ class EmotionToVideo:
27
+ """Dataclass for emotion to video mapping.
28
+
29
+ Attributes:
30
+ emotion: The emotion.
31
+ video_name: The video name.
32
+ threshold: The threshold for the emotion.
33
+ actions: The actions before the video.
34
+ """
35
+
36
+ emotion: Emotion
37
+ video_name: str
38
+ threshold: float = 0.1
39
+ actions: List[str] = field(default_factory=list)
40
+
41
+
42
+ @dataclass_json
43
+ @dataclass
44
+ class IdleAction:
45
+ """Dataclass for idle action.
46
+
47
+ Attributes:
48
+ actions: The actions for the idle video.
49
+ interval: Play the action every interval seconds
50
+ or randomly between the interval.
51
+ """
52
+
53
+ actions: List[str] = field(default_factory=list)
54
+ interval: float | Tuple[float, float] = 60
55
+
56
+ @property
57
+ def min_interval(self) -> float:
58
+ return self.interval if isinstance(self.interval, float) else self.interval[0]
59
+
60
+
61
+ @dataclass_json
62
+ @dataclass
63
+ class VideoScript:
64
+ default_video: Optional[str] = None
65
+ action_hi_video: Optional[str] = None
66
+ emotions_map: List[EmotionToVideo] = field(default_factory=list)
67
+ idle_video: Optional[str] = None
68
+ idle_actions: List[IdleAction] = field(default_factory=list)
69
+ FPS: float = 25
70
+ BACK_TO_IDLE: float = 10 # back to idle after 10 seconds
71
+
72
+ def __post_init__(self):
73
+ self.last_nonidle_frame = 0
74
+ self.last_video_name = None
75
+
76
+ self.update_index()
77
+
78
+ def update_index(self):
79
+ # index the videos by emotion and group by threshold
80
+ emotions_map_index: Dict[Emotion, Dict[float, List[EmotionToVideo]]] = {}
81
+ for e2v in self.emotions_map:
82
+ emotions_map_index.setdefault(e2v.emotion, defaultdict(list))[
83
+ e2v.threshold
84
+ ].append(e2v)
85
+
86
+ # sort the videos by threshold in descending order
87
+ for emotion in emotions_map_index:
88
+ emotions_map_index[emotion] = OrderedDict(
89
+ sorted(
90
+ emotions_map_index[emotion].items(),
91
+ key=lambda x: x[0],
92
+ reverse=True,
93
+ )
94
+ )
95
+ self.emotions_map_index = emotions_map_index
96
+
97
+ # Next idle action, randomly selected from the idle actions
98
+ self.set_next_idle_action()
99
+
100
+ def set_next_idle_action(self, next_idle_action: IdleAction | None = None):
101
+ """Set the next idle action.
102
+
103
+ Args:
104
+ next_idle_action: The next idle action. If None, randomly select one.
105
+ """
106
+ if not next_idle_action:
107
+ next_idle_action = (
108
+ np.random.choice(self.idle_actions) if self.idle_actions else None
109
+ )
110
+
111
+ self.next_idle_action = next_idle_action
112
+ if not self.next_idle_action:
113
+ self.next_idle_action_interval = None
114
+ return
115
+
116
+ if isinstance(self.next_idle_action.interval, (tuple, list)):
117
+ interval = np.random.uniform(*self.next_idle_action.interval)
118
+ else:
119
+ interval = self.next_idle_action.interval
120
+ self.next_idle_action_interval = interval
121
+
122
+ def get_video_and_actions(
123
+ self,
124
+ curr_frame_index: int,
125
+ emotions: List[EmotionPrediction] = None,
126
+ text: str = None,
127
+ is_idle: bool = False,
128
+ settings: Settings = None,
129
+ ) -> Tuple[str, List[str], bool]:
130
+ """Get the videos for the emotion."""
131
+ is_idle = is_idle and not emotions # idle only when no emotion
132
+
133
+ reset_action = False
134
+ video_name, actions = None, []
135
+ if is_idle:
136
+ idle_time = (curr_frame_index - self.last_nonidle_frame) / self.FPS
137
+
138
+ # Back to the default video if idle for a long time
139
+ if idle_time >= self.BACK_TO_IDLE:
140
+ next_idle_video = (
141
+ self.idle_video
142
+ if settings and settings.LIVA_IDEL_VIDEO_ENABLED
143
+ else None
144
+ )
145
+ # TODO: random select one idle video from a list
146
+ video_name = next_idle_video or self.default_video
147
+
148
+ # Play a random idle action if the idle time is long enough
149
+ if self.next_idle_action and idle_time >= self.next_idle_action_interval:
150
+ # Play the idle action, then reset the next idle action
151
+ actions = self.next_idle_action.actions
152
+ reset_action = True
153
+ self.set_next_idle_action()
154
+ self.last_nonidle_frame = curr_frame_index
155
+ else:
156
+ self.last_nonidle_frame = curr_frame_index
157
+
158
+ # Get the video for the emotion
159
+ if emotions:
160
+ # Ignore neutral if there are other emotions > 0.2
161
+ if len(emotions) > 1:
162
+ if (
163
+ emotions[0].emotion == Emotion.NEUTRAL
164
+ and emotions[1].score > 0.2
165
+ ):
166
+ emotions = emotions[1:]
167
+
168
+ top_emotion = emotions[0]
169
+ for threshold, e2v_list in self.emotions_map_index.get(
170
+ top_emotion.emotion, {}
171
+ ).items():
172
+ if top_emotion.score >= threshold and e2v_list:
173
+ # Randomly select one video from the list
174
+ e2v: EmotionToVideo = np.random.choice(e2v_list)
175
+ video_name, actions = e2v.video_name, e2v.actions
176
+ break
177
+ if (
178
+ text
179
+ and self.action_hi_video
180
+ and settings
181
+ and settings.LIVA_AUTO_SAY_HI
182
+ and self.action_hi_video not in actions
183
+ ):
184
+ # Detect Hello, hi, Bye, Goodbye
185
+ say_hi = False
186
+ text = text.lower()
187
+ for word in ["hello", "hi ", "bye", "goodbye"]:
188
+ if text.startswith(word):
189
+ say_hi = True
190
+ break
191
+ if say_hi:
192
+ actions = [self.action_hi_video] + actions
193
+
194
+ if (
195
+ not video_name
196
+ and self.idle_video
197
+ and self.last_video_name == self.idle_video
198
+ ):
199
+ # Back to the default video if nonidle and no video is selected
200
+ video_name = self.default_video
201
+
202
+ self.last_video_name = video_name
203
+ return video_name, actions, reset_action
204
+
205
+
206
+ @dataclass_json
207
+ @dataclass
208
+ class VideoConfig:
209
+ name: str
210
+ video_file: str
211
+ video_type: str = "LoopingVideo"
212
+ stride: int = 10
213
+ loop_between: Tuple[int, int] = (0, -1)
214
+ remove_nodes: Optional[List[int]] = None
215
+ transition_frames: Optional[List[int]] = None
216
+ action_frame: int = -1
217
+ single_direction: bool = False
218
+ adding_kwargs: Dict = field(default_factory=dict)
219
+ lip_sync_required: bool = True
220
+ stop_on_user_speech: bool = False
221
+ stop_on_agent_speech: bool = False
222
+
223
+ def load_video(self) -> DriverVideo:
224
+ if self.video_type == "LoopingVideo":
225
+ video = LoopingVideo(
226
+ name=self.name,
227
+ video_path=self.video_file,
228
+ stride=self.stride,
229
+ single_direction=self.single_direction,
230
+ stop_on_user_speech=self.stop_on_user_speech,
231
+ stop_on_agent_speech=self.stop_on_agent_speech,
232
+ loop_between=self.loop_between,
233
+ lip_sync_required=self.lip_sync_required,
234
+ )
235
+ elif self.video_type == "SingleActionVideo":
236
+ video = SingleActionVideo(
237
+ name=self.name,
238
+ video_path=self.video_file,
239
+ single_direction=self.single_direction,
240
+ stop_on_user_speech=self.stop_on_user_speech,
241
+ stop_on_agent_speech=self.stop_on_agent_speech,
242
+ transition_frames=self.transition_frames,
243
+ action_frame=self.action_frame,
244
+ lip_sync_required=self.lip_sync_required,
245
+ )
246
+ else:
247
+ raise ValueError(f"Unknown video type: {self.video_type}")
248
+
249
+ if self.lip_sync_required and not video.video_data_path:
250
+ raise ValueError(
251
+ f"Lip sync is required for video {self.name}, but no video data path is provided"
252
+ )
253
+
254
+ if self.remove_nodes:
255
+ video.remove_nodes(frame_indices=self.remove_nodes)
256
+ return video
257
+
258
+
259
+ @dataclass_json
260
+ @dataclass
261
+ class VideoConfigs:
262
+ videos: List[VideoConfig]
263
+ videos_script: Optional[VideoScript] = field(default_factory=VideoScript)
264
+ talking_face_configs: Optional[Dict[str, Any]] = None
265
+
266
+ def load_videos(
267
+ self, video_root: str = None, verbose: bool = True
268
+ ) -> List["DriverVideo"]:
269
+ if video_root:
270
+ video_configs = copy.deepcopy(self.videos)
271
+ video_root = Path(video_root)
272
+ for video in video_configs:
273
+ video.video_file = str(video_root / video.video_file)
274
+ else:
275
+ video_configs = self.videos
276
+
277
+ return [video_config.load_video() for video_config in video_configs]
278
+
279
+ @classmethod
280
+ def from_videofolder(cls, video_folder: str) -> "VideoConfigs":
281
+ """Create a VideoConfigs object with a video folder."""
282
+ video_files = [
283
+ p
284
+ for p in Path(video_folder).iterdir()
285
+ if p.suffix.lower() in VIDEO_EXTS and not p.name.startswith(".")
286
+ ]
287
+ if len(video_files) == 0:
288
+ raise ValueError(f"No video files found in {video_folder}")
289
+ if len(video_files) > 1:
290
+ raise ValueError(
291
+ f"Multiple video files found in {video_folder}: {video_files}"
292
+ )
293
+ video_file = video_files[0]
294
+ return cls(
295
+ videos=[VideoConfig(name=video_file.stem, video_file=str(video_file))]
296
+ )
297
+
298
+ @classmethod
299
+ def from_videofile(
300
+ cls, video_file: str, inference_data_file: str = None
301
+ ) -> "VideoConfigs":
302
+ """Create a VideoConfigs object with a video file."""
303
+ return cls(
304
+ videos=[
305
+ VideoConfig(
306
+ name=Path(video_file).stem,
307
+ video_file=str(video_file),
308
+ inference_data_file=inference_data_file,
309
+ )
310
+ ]
311
+ )
312
+
313
+ @classmethod
314
+ def from_yaml(cls, file_path: str) -> "VideoConfigs":
315
+ """Load the video configs from a YAML file."""
316
+ with open(file_path) as f:
317
+ data = yaml.safe_load(f)
318
+ return cls.from_dict(data)
319
+
320
+ def to_yaml(self, file_path: str) -> None:
321
+ """Save the video configs to a YAML file."""
322
+ with open(file_path, "w") as f:
323
+ data = json.loads(self.to_json())
324
+ yaml.dump(data, f, sort_keys=False)
325
+
326
+ def update_runtime_configs(self, settings: Settings):
327
+ """Update the runtime configs."""
328
+ if not settings.ALLOW_VIDEO_SCRIPT_UPDATE:
329
+ logger.info(
330
+ "Video script update is disabled, skip updating runtime configs."
331
+ )
332
+ return
333
+
334
+ configs = self.talking_face_configs or {}
335
+ fails, success = {}, {}
336
+ for k, v in configs.items():
337
+ if hasattr(settings, k):
338
+ setattr(settings, k, v)
339
+ success[k] = v
340
+ else:
341
+ fails[k] = v
342
+ logger.info(f"Updated runtime configs from model: {success}")
343
+ if fails:
344
+ logger.warning(f"Runtime configs not found in settings: {fails}")
@@ -0,0 +1,37 @@
1
+ Metadata-Version: 2.4
2
+ Name: bithuman
3
+ Version: 1.0.2
4
+ Summary: Bithuman avatar runtime — pure Python engine
5
+ Platform: Linux
6
+ Platform: Mac OS X
7
+ Platform: Windows
8
+ Requires-Python: >=3.9
9
+ Requires-Dist: numpy>=1.26.0
10
+ Requires-Dist: h5py~=3.13
11
+ Requires-Dist: loguru~=0.7
12
+ Requires-Dist: numba<1.0,>=0.60
13
+ Requires-Dist: soxr~=0.5
14
+ Requires-Dist: scipy<2.0,>=1.13
15
+ Requires-Dist: moviepy~=1.0.3
16
+ Requires-Dist: soundfile~=0.13
17
+ Requires-Dist: pydantic~=2.10
18
+ Requires-Dist: pydantic-settings~=2.8
19
+ Requires-Dist: networkx<4.0,>=3.1
20
+ Requires-Dist: tqdm~=4.67
21
+ Requires-Dist: dataclasses-json~=0.6
22
+ Requires-Dist: librosa~=0.10
23
+ Requires-Dist: pyzmq~=26.2; python_version < "3.14"
24
+ Requires-Dist: msgpack~=1.1
25
+ Requires-Dist: PyYAML~=6.0
26
+ Requires-Dist: aiohttp~=3.11
27
+ Requires-Dist: onnxruntime>=1.18
28
+ Requires-Dist: av>=12.0
29
+ Requires-Dist: PyJWT>=2.8
30
+ Requires-Dist: requests>=2.31
31
+ Requires-Dist: lz4>=4.3
32
+ Requires-Dist: PyTurboJPEG>=1.7
33
+ Requires-Dist: opencv-python-headless>=4.8
34
+ Provides-Extra: agent
35
+ Requires-Dist: livekit-agents~=1.1; extra == "agent"
36
+ Dynamic: platform
37
+ Dynamic: requires-python
@@ -0,0 +1,44 @@
1
+ bithuman/__init__.py,sha256=MI3NNap09CT5durEAjHBRW-gv3bu-yHfHQQNeP63ZoQ,286
2
+ bithuman/_version.py,sha256=Y3LSfRioSl2xch70pq_ULlvyECXyEtN3krVaWeGyaxk,22
3
+ bithuman/api.py,sha256=yJZIkMODYXcvdRx7t9nYuNktr1gfCfqfcx8t-xjJbO4,4704
4
+ bithuman/config.py,sha256=_x3O_HEpw05obPWB0bjzmRrh-IPZQ2-wBFq75p6-280,2646
5
+ bithuman/runtime.py,sha256=h9AzmHJBejJ5kULZrh9efveL4oaF7qHX0DVVECc26RA,40122
6
+ bithuman/runtime_async.py,sha256=2iQ-zKjAqmJj0ezJSw32hTJhGJBUeXpZ-pEHUHEbJmw,18953
7
+ bithuman/audio/__init__.py,sha256=tSUqsLLmxLEeBZ-O0XRSGQIB2igsA9eWr7vzgxMtugg,358
8
+ bithuman/audio/audio.py,sha256=Wfrm7UGTBAMgkzwwoicVf9SvvCF8KzW-mqcG-R5K-wI,10807
9
+ bithuman/audio/hparams.py,sha256=ytgTtxvNgJvrm5rAmhq8jdF_rgt1IMmSxFYnjD-hpmk,4078
10
+ bithuman/audio/utils.py,sha256=16B5klsdIbhknoNrbHz1BsvWaoVec1JQ4YswIWzwelc,8464
11
+ bithuman/engine/__init__.py,sha256=fZXWUyaYxokaiUp62VtbXwlJD3t-1FAtUH0CQ1E2sLc,518
12
+ bithuman/engine/auth.py,sha256=BcIQJvibohgJYlWFuKhFbkcIYWBMKSZyHkG_94jHLDU,11540
13
+ bithuman/engine/compression.py,sha256=JPnVTL1oeGXWFjR2HdkDLdXiTkbJXX-D0KK82yE0jLM,8659
14
+ bithuman/engine/enums.py,sha256=m4yAP4j4UD2E2qSOiY7fMP5y45WaQ7PDPhzKzTy9sRA,254
15
+ bithuman/engine/image_ops.py,sha256=gUl8LQdgVj2NSAH9peLdiv2KN49f6Y-RHmkuk3fVhTQ,6672
16
+ bithuman/engine/inference.py,sha256=eYGASWlTM3URbG-tUGs25MVCm75KTag2NqaRwDE69BA,4058
17
+ bithuman/engine/knn.py,sha256=lWvGkgudYhp75la1FFZDoeW8PHmMQ_g_BSFbEYtv9Uk,1832
18
+ bithuman/engine/video_data.py,sha256=BEWPdWu-qakM_bMr9pEdK_HDga37_f6Iq2yQudIxv90,14052
19
+ bithuman/engine/video_reader.py,sha256=l40YUpzbotbq73XjypEru1KMqDASXd81vYgbuS5gcqA,5132
20
+ bithuman/lib/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
21
+ bithuman/lib/audio_encoder.onnx,sha256=6r2eD03YdD_ONH_H8rAlA7ue0KQSv3T0JC__mhNRiM0,2840632
22
+ bithuman/lib/generator.py,sha256=JJJ6G42qhnfJDVYgDPMRaYbEstQEb7PsCAQWCi-kjwk,28954
23
+ bithuman/lib/pth2h5.py,sha256=mOdFbuZtQfydWbsJ-9U3IqRPH10Jtt5Z3bfwu2S0b3g,3004
24
+ bithuman/plugins/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
25
+ bithuman/plugins/stt.py,sha256=_i3ChycPjg7Woeq-EXQGNJq7bDowuiWmkaU8X3zSJSM,5802
26
+ bithuman/service/__init__.py,sha256=YfXb20u8xkceD1LmqWbqrODa1NAeYluPY_WA-mF9MeM,208
27
+ bithuman/service/client.py,sha256=66CQhPvk-zV331IcCWS7igFAvEPL5alFnfOjUB-FF60,28851
28
+ bithuman/service/messages.py,sha256=iZ7bjwAV9M06xxE5z87FLzvRL2KsGUmtTK9HQlZPWbk,6075
29
+ bithuman/service/server.py,sha256=cfbD2WfPUjEPgiGPtgJz1Tc6ACZ8tZnveJ1YAzO7OFI,28026
30
+ bithuman/utils/__init__.py,sha256=HRszl5Vv_qjq0_NDlP6uFPc-dl0R-IrrcUeObAVHOnc,1245
31
+ bithuman/utils/agent.py,sha256=2LvzXcaE5xuViTnPMntsnX1Mmj73pTfYE_2z1eQpF7Y,11955
32
+ bithuman/utils/fps_controller.py,sha256=NvesSQeGG75aVBOfofujH-orCpbIutrh4c8Dt4fFoGg,3005
33
+ bithuman/utils/image.py,sha256=Cu51bKvcfYdG4cvuLSRI0fl6cfN6eQdigMQR-szEaog,1173
34
+ bithuman/utils/unzip.py,sha256=S5dKjEcQAGuDl9RM3o0SYBP4vOVoMcu7cViT96U9Kwo,1298
35
+ bithuman/video_graph/__init__.py,sha256=qKqr5wjUr15YRuCB_ivEjnS34vkwZS5W3O3eFxlkvlk,400
36
+ bithuman/video_graph/action_trigger.py,sha256=22it95VOHSL6zR1HdSGqtQgysQN5t9awGt1NgRGfioM,2636
37
+ bithuman/video_graph/driver_video.py,sha256=TVBANOo2R25Q_sTf03pxUVosVHrU4MlYP_EkFrwMhv4,17076
38
+ bithuman/video_graph/navigator.py,sha256=4UJpqBQHc4_aBbZcyyNuFOSuQZLqnzikKrZ57jaPWes,27475
39
+ bithuman/video_graph/trigger.py,sha256=0I8p4JGzB2LVtB8y_OIz4IIX3qYc6YuHMU-00KDb4IQ,2910
40
+ bithuman/video_graph/video_script.py,sha256=_I0aG7hAcnIU8GKghUNc4ZqyvkB_-4MaEbAdlnefJcY,12056
41
+ bithuman-1.0.2.dist-info/METADATA,sha256=eplXy89F0kbptdedwg6paE5Zdf6QLSk5C27_Zp5BM24,1076
42
+ bithuman-1.0.2.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
43
+ bithuman-1.0.2.dist-info/top_level.txt,sha256=NhjtjyoEtM5UOK1XHTJ8xzw72SanqkCwpeTKMMTykG8,9
44
+ bithuman-1.0.2.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.10.2)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1 @@
1
+ bithuman