pyfaceau 1.3.9__tar.gz → 1.3.10__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/MANIFEST.in +3 -3
- pyfaceau-1.3.10/PKG-INFO +243 -0
- pyfaceau-1.3.10/README.md +193 -0
- pyfaceau-1.3.10/pyfaceau/__init__.py +46 -0
- pyfaceau-1.3.10/pyfaceau/download_weights.py +343 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/features/pdm.py +6 -1
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/pipeline.py +6 -7
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/prediction/batched_au_predictor.py +1 -1
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/prediction/model_parser.py +1 -1
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/processor.py +31 -5
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/refinement/targeted_refiner.py +1 -1
- pyfaceau-1.3.10/pyfaceau.egg-info/PKG-INFO +243 -0
- pyfaceau-1.3.10/pyfaceau.egg-info/SOURCES.txt +179 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau.egg-info/entry_points.txt +1 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau.egg-info/requires.txt +6 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyproject.toml +8 -1
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/requirements.txt +2 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/setup.py +2 -1
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_10_dynamic.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_10_static.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_12_dynamic.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_12_static.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_14_dynamic.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_14_static.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_15_dynamic.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_15_static.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_17_dynamic.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_17_static.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_1_dynamic.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_1_static.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_20_dynamic.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_20_static.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_23_dynamic.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_23_static.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_25_dynamic.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_25_static.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_26_dynamic.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_26_static.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_28_dynamic.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_28_static.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_2_dynamic.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_2_static.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_45_dynamic.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_45_static.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_4_dynamic.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_4_static.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_5_dynamic.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_5_static.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_6_dynamic.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_6_static.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_7_dynamic.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_7_static.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_9_dynamic.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_combined/AU_9_static.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_disfa/AU_12_dyn.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_disfa/AU_12_stat.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_disfa/AU_15_dyn.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_disfa/AU_15_stat.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_disfa/AU_17_dyn.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_disfa/AU_17_stat.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_disfa/AU_1_dyn.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_disfa/AU_1_stat.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_disfa/AU_20_dyn.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_disfa/AU_20_stat.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_disfa/AU_25_dyn.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_disfa/AU_25_stat.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_disfa/AU_26_dyn.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_disfa/AU_26_stat.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_disfa/AU_2_dyn.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_disfa/AU_2_stat.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_disfa/AU_4_dyn.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_disfa/AU_4_stat.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_disfa/AU_5_dyn.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_disfa/AU_5_stat.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_disfa/AU_6_dyn.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_disfa/AU_6_stat.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_disfa/AU_9_dyn.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svm_disfa/AU_9_stat.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_combined/AU_10_static_intensity_comb.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_combined/AU_12_static_intensity_comb.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_combined/AU_14_static_intensity.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_combined/AU_15_dynamic_intensity_comb.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_combined/AU_15_static_intensity_comb.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_combined/AU_17_dynamic_intensity_comb.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_combined/AU_17_static_intensity_comb.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_combined/AU_1_dynamic_intensity_comb.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_combined/AU_1_static_intensity.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_combined/AU_1_static_intensity_comb.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_combined/AU_20_dynamic_intensity.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_combined/AU_20_static_intensity.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_combined/AU_23_dynamic_intensity_comb.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_combined/AU_23_static_intensity_comb.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_combined/AU_25_dynamic_intensity_comb.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_combined/AU_25_static_intensity.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_combined/AU_26_dynamic_intensity_comb.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_combined/AU_26_static_intensity_comb.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_combined/AU_2_dynamic_intensity_comb.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_combined/AU_2_static_intensity_comb.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_combined/AU_45_dynamic_intensity_comb.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_combined/AU_45_static_intensity_comb.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_combined/AU_4_static_intensity_comb.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_combined/AU_5_dynamic_intensity.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_combined/AU_5_static_intensity.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_combined/AU_6_static_intensity_comb.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_combined/AU_7_static_intensity_comb.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_combined/AU_9_dynamic_intensity.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_combined/AU_9_static_intensity.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_disfa/AU_12_dynamic_intensity.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_disfa/AU_12_static_intensity.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_disfa/AU_15_dynamic_intensity.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_disfa/AU_15_static_intensity.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_disfa/AU_17_dynamic_intensity.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_disfa/AU_17_static_intensity.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_disfa/AU_1_dynamic_intensity.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_disfa/AU_1_static_intensity.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_disfa/AU_20_dynamic_intensity.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_disfa/AU_20_static_intensity.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_disfa/AU_25_dynamic_intensity.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_disfa/AU_25_static_intensity.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_disfa/AU_26_dynamic_intensity.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_disfa/AU_26_static_intensity.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_disfa/AU_2_dynamic_intensity.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_disfa/AU_2_static_intensity.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_disfa/AU_4_dynamic_intensity.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_disfa/AU_4_static_intensity.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_disfa/AU_5_dynamic_intensity.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_disfa/AU_5_static_intensity.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_disfa/AU_6_dynamic_intensity.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_disfa/AU_6_static_intensity.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_disfa/AU_9_dynamic_intensity.dat +3 -0
- pyfaceau-1.3.10/weights/AU_predictors/svr_disfa/AU_9_static_intensity.dat +3 -0
- pyfaceau-1.3.10/weights/patch_experts/cen_patches_0.25_of.dat +3 -0
- pyfaceau-1.3.10/weights/patch_experts/cen_patches_0.35_of.dat +3 -0
- pyfaceau-1.3.10/weights/patch_experts/cen_patches_0.50_of.dat +3 -0
- pyfaceau-1.3.10/weights/patch_experts/cen_patches_1.00_of.dat +3 -0
- pyfaceau-1.3.9/PKG-INFO +0 -84
- pyfaceau-1.3.9/README.md +0 -39
- pyfaceau-1.3.9/pyfaceau/__init__.py +0 -19
- pyfaceau-1.3.9/pyfaceau/download_weights.py +0 -134
- pyfaceau-1.3.9/pyfaceau.egg-info/PKG-INFO +0 -84
- pyfaceau-1.3.9/pyfaceau.egg-info/SOURCES.txt +0 -62
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/LICENSE +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/alignment/__init__.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/alignment/calc_params.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/alignment/face_aligner.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/alignment/numba_calcparams_accelerator.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/alignment/paw.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/config.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/data/__init__.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/data/hdf5_dataset.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/data/quality_filter.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/data/training_data_generator.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/detectors/__init__.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/detectors/extract_mtcnn_weights.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/detectors/openface_mtcnn.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/detectors/pfld.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/detectors/pymtcnn_detector.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/detectors/retinaface.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/features/__init__.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/features/histogram_median_tracker.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/features/triangulation.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/nn/__init__.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/nn/au_prediction_inference.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/nn/au_prediction_net.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/nn/fast_pipeline.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/nn/landmark_pose_inference.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/nn/landmark_pose_net.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/nn/train_au_prediction.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/nn/train_landmark_pose.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/parallel_pipeline.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/prediction/__init__.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/prediction/au_predictor.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/prediction/online_au_correction.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/prediction/running_median.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/prediction/running_median_fallback.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/refinement/__init__.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/refinement/pdm.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/refinement/svr_patch_expert.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/utils/__init__.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/utils/cython_extensions/cython_histogram_median.pyx +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/utils/cython_extensions/cython_rotation_update.pyx +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau/utils/cython_extensions/setup.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau.egg-info/dependency_links.txt +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau.egg-info/not-zip-safe +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau.egg-info/top_level.txt +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/pyfaceau_gui.py +0 -0
- {pyfaceau-1.3.9 → pyfaceau-1.3.10}/setup.cfg +0 -0
|
@@ -21,9 +21,9 @@ global-exclude *.pyo
|
|
|
21
21
|
global-exclude __pycache__
|
|
22
22
|
global-exclude .DS_Store
|
|
23
23
|
|
|
24
|
-
#
|
|
25
|
-
|
|
26
|
-
recursive-
|
|
24
|
+
# Include weights for git-based installs (but these are excluded from PyPI sdist)
|
|
25
|
+
# Users installing from git will get weights; PyPI users must download separately
|
|
26
|
+
recursive-include weights *.txt *.dat
|
|
27
27
|
|
|
28
28
|
# Exclude development files
|
|
29
29
|
exclude .gitignore
|
pyfaceau-1.3.10/PKG-INFO
ADDED
|
@@ -0,0 +1,243 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: pyfaceau
|
|
3
|
+
Version: 1.3.10
|
|
4
|
+
Summary: Pure Python OpenFace 2.2 AU extraction with CLNF landmark refinement
|
|
5
|
+
Home-page: https://github.com/johnwilsoniv/face-analysis
|
|
6
|
+
Author: John Wilson
|
|
7
|
+
Author-email:
|
|
8
|
+
License: CC BY-NC 4.0
|
|
9
|
+
Project-URL: Homepage, https://github.com/johnwilsoniv/pyfaceau
|
|
10
|
+
Project-URL: Documentation, https://github.com/johnwilsoniv/pyfaceau
|
|
11
|
+
Project-URL: Repository, https://github.com/johnwilsoniv/pyfaceau
|
|
12
|
+
Project-URL: Bug Tracker, https://github.com/johnwilsoniv/pyfaceau/issues
|
|
13
|
+
Keywords: facial-action-units,openface,computer-vision,facial-analysis,emotion-recognition
|
|
14
|
+
Classifier: Development Status :: 5 - Production/Stable
|
|
15
|
+
Classifier: Intended Audience :: Science/Research
|
|
16
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
17
|
+
Classifier: Topic :: Scientific/Engineering :: Image Recognition
|
|
18
|
+
Classifier: License :: Other/Proprietary License
|
|
19
|
+
Classifier: Programming Language :: Python :: 3
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
21
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
22
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
23
|
+
Classifier: Operating System :: OS Independent
|
|
24
|
+
Requires-Python: >=3.10
|
|
25
|
+
Description-Content-Type: text/markdown
|
|
26
|
+
License-File: LICENSE
|
|
27
|
+
Requires-Dist: numpy>=1.20.0
|
|
28
|
+
Requires-Dist: opencv-python>=4.5.0
|
|
29
|
+
Requires-Dist: pandas>=1.3.0
|
|
30
|
+
Requires-Dist: onnxruntime>=1.10.0
|
|
31
|
+
Requires-Dist: scipy>=1.7.0
|
|
32
|
+
Requires-Dist: scikit-learn>=1.0.0
|
|
33
|
+
Requires-Dist: tqdm>=4.62.0
|
|
34
|
+
Requires-Dist: numba>=0.56.0
|
|
35
|
+
Requires-Dist: pyfhog>=0.1.0
|
|
36
|
+
Requires-Dist: pyclnf>=0.2.0
|
|
37
|
+
Requires-Dist: pymtcnn>=0.1.0
|
|
38
|
+
Provides-Extra: dev
|
|
39
|
+
Requires-Dist: pytest>=7.0.0; extra == "dev"
|
|
40
|
+
Requires-Dist: black>=22.0.0; extra == "dev"
|
|
41
|
+
Requires-Dist: flake8>=4.0.0; extra == "dev"
|
|
42
|
+
Provides-Extra: accel
|
|
43
|
+
Requires-Dist: onnxruntime-coreml>=1.10.0; extra == "accel"
|
|
44
|
+
Provides-Extra: training
|
|
45
|
+
Requires-Dist: torch>=2.0.0; extra == "training"
|
|
46
|
+
Requires-Dist: h5py>=3.0.0; extra == "training"
|
|
47
|
+
Dynamic: home-page
|
|
48
|
+
Dynamic: license-file
|
|
49
|
+
Dynamic: requires-python
|
|
50
|
+
|
|
51
|
+
# pyfaceau
|
|
52
|
+
|
|
53
|
+
A python-based implementation of OpenFace 2.2's Facial Action Unit extraction pipeline with an accurate dlib substitute (ptmtcnn, pyclnf).
|
|
54
|
+
|
|
55
|
+
**Accuracy: r = 0.97 correlation with C++ OpenFace 2.2**
|
|
56
|
+
|
|
57
|
+
## Installation
|
|
58
|
+
|
|
59
|
+
### Option 1: Install from GitHub (Recommended for Development)
|
|
60
|
+
|
|
61
|
+
This includes model weights:
|
|
62
|
+
|
|
63
|
+
```bash
|
|
64
|
+
# Clone repository with weights
|
|
65
|
+
git clone https://github.com/johnwilsoniv/pyfaceau.git
|
|
66
|
+
cd pyfaceau
|
|
67
|
+
|
|
68
|
+
# Create conda environment (recommended)
|
|
69
|
+
conda create -n pyfaceau python=3.11
|
|
70
|
+
conda activate pyfaceau
|
|
71
|
+
|
|
72
|
+
# Install in editable mode
|
|
73
|
+
pip install -e .
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
### Option 2: Install from PyPI
|
|
77
|
+
|
|
78
|
+
```bash
|
|
79
|
+
pip install pyfaceau
|
|
80
|
+
```
|
|
81
|
+
|
|
82
|
+
This automatically installs dependencies:
|
|
83
|
+
- [pyclnf](https://github.com/johnwilsoniv/pyclnf) - Facial landmark detection (68 points)
|
|
84
|
+
- [pymtcnn](https://github.com/johnwilsoniv/pymtcnn) - Face detection
|
|
85
|
+
- [pyfhog](https://github.com/johnwilsoniv/pyfhog) - FHOG feature extraction
|
|
86
|
+
|
|
87
|
+
### Model Weights
|
|
88
|
+
|
|
89
|
+
Model weights (~50MB) are downloaded automatically on first use. If auto-download fails, download manually:
|
|
90
|
+
|
|
91
|
+
```bash
|
|
92
|
+
# Download weights
|
|
93
|
+
python -m pyfaceau.download_weights
|
|
94
|
+
|
|
95
|
+
# Or after pip install
|
|
96
|
+
pyfaceau-download-weights
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
Weights are stored in `~/.pyfaceau/weights/`. You can customize this location:
|
|
100
|
+
|
|
101
|
+
```bash
|
|
102
|
+
# Set custom weights directory
|
|
103
|
+
export PYFACEAU_WEIGHTS_DIR=/path/to/your/weights
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
### Troubleshooting: "PDM file not found"
|
|
107
|
+
|
|
108
|
+
If you see this error:
|
|
109
|
+
1. **First try**: Run `python -m pyfaceau.download_weights`
|
|
110
|
+
2. **Manual fix**: Copy the `weights/` folder from the GitHub repo to `~/.pyfaceau/weights/`
|
|
111
|
+
3. **Alternative**: Set `PYFACEAU_WEIGHTS_DIR` environment variable to point to your weights
|
|
112
|
+
|
|
113
|
+
## Quick Start
|
|
114
|
+
|
|
115
|
+
### Video Processing (Recommended)
|
|
116
|
+
|
|
117
|
+
```python
|
|
118
|
+
from pyfaceau import OpenFaceProcessor
|
|
119
|
+
|
|
120
|
+
# Initialize processor
|
|
121
|
+
processor = OpenFaceProcessor(verbose=True)
|
|
122
|
+
|
|
123
|
+
# Process video to CSV (same format as OpenFace)
|
|
124
|
+
processor.process_video("input.mp4", "output.csv")
|
|
125
|
+
```
|
|
126
|
+
|
|
127
|
+
### Batch Processing
|
|
128
|
+
|
|
129
|
+
```python
|
|
130
|
+
from pyfaceau import process_videos
|
|
131
|
+
|
|
132
|
+
# Process all videos in a directory
|
|
133
|
+
process_videos(
|
|
134
|
+
directory_path="/path/to/videos",
|
|
135
|
+
output_dir="/path/to/output"
|
|
136
|
+
)
|
|
137
|
+
```
|
|
138
|
+
|
|
139
|
+
### Frame-by-Frame Processing
|
|
140
|
+
|
|
141
|
+
```python
|
|
142
|
+
from pyfaceau import FullPythonAUPipeline
|
|
143
|
+
from pathlib import Path
|
|
144
|
+
import cv2
|
|
145
|
+
|
|
146
|
+
# Initialize pipeline with model paths
|
|
147
|
+
weights_dir = Path("weights")
|
|
148
|
+
pipeline = FullPythonAUPipeline(
|
|
149
|
+
pdm_file=str(weights_dir / "In-the-wild_aligned_PDM_68.txt"),
|
|
150
|
+
au_models_dir=str(weights_dir / "AU_predictors"),
|
|
151
|
+
triangulation_file=str(weights_dir / "tris_68_full.txt"),
|
|
152
|
+
patch_expert_file=str(weights_dir / "svr_patches_0.25_general.txt")
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
# Process single frame
|
|
156
|
+
image = cv2.imread("face.jpg")
|
|
157
|
+
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
|
158
|
+
|
|
159
|
+
result = pipeline.process_frame(image_rgb, frame_num=0)
|
|
160
|
+
|
|
161
|
+
if result['success']:
|
|
162
|
+
print("AU intensities:", result['au_intensities'])
|
|
163
|
+
print("Landmarks shape:", result['landmarks_2d'].shape) # (68, 2)
|
|
164
|
+
print("Pose (pitch, yaw, roll):", result['pose'])
|
|
165
|
+
```
|
|
166
|
+
|
|
167
|
+
## Output Format
|
|
168
|
+
|
|
169
|
+
### CSV Output Columns
|
|
170
|
+
|
|
171
|
+
The output CSV matches OpenFace format:
|
|
172
|
+
- `frame` - Frame number
|
|
173
|
+
- `timestamp` - Time in seconds
|
|
174
|
+
- `confidence` - Detection confidence
|
|
175
|
+
- `success` - Whether face was detected
|
|
176
|
+
- `AU01_r` through `AU45_r` - AU intensities (0.0 - 5.0)
|
|
177
|
+
- `pose_Rx`, `pose_Ry`, `pose_Rz` - Head pose in radians
|
|
178
|
+
- `x_0` through `x_67`, `y_0` through `y_67` - 68 landmark coordinates
|
|
179
|
+
|
|
180
|
+
### Action Units
|
|
181
|
+
|
|
182
|
+
17 facial action units with intensity values (0.0 - 5.0):
|
|
183
|
+
|
|
184
|
+
| AU | Description |
|
|
185
|
+
|----|-------------|
|
|
186
|
+
| AU01 | Inner Brow Raiser |
|
|
187
|
+
| AU02 | Outer Brow Raiser |
|
|
188
|
+
| AU04 | Brow Lowerer |
|
|
189
|
+
| AU05 | Upper Lid Raiser |
|
|
190
|
+
| AU06 | Cheek Raiser |
|
|
191
|
+
| AU07 | Lid Tightener |
|
|
192
|
+
| AU09 | Nose Wrinkler |
|
|
193
|
+
| AU10 | Upper Lip Raiser |
|
|
194
|
+
| AU12 | Lip Corner Puller |
|
|
195
|
+
| AU14 | Dimpler |
|
|
196
|
+
| AU15 | Lip Corner Depressor |
|
|
197
|
+
| AU17 | Chin Raiser |
|
|
198
|
+
| AU20 | Lip Stretcher |
|
|
199
|
+
| AU23 | Lip Tightener |
|
|
200
|
+
| AU25 | Lips Part |
|
|
201
|
+
| AU26 | Jaw Drop |
|
|
202
|
+
| AU45 | Blink |
|
|
203
|
+
|
|
204
|
+
## Accuracy
|
|
205
|
+
|
|
206
|
+
Validated against C++ OpenFace 2.2
|
|
207
|
+
|
|
208
|
+
| Metric | Correlation |
|
|
209
|
+
|--------|-------------|
|
|
210
|
+
| **Overall Mean** | r = 0.97 |
|
|
211
|
+
| **Overall Median** | r = 0.996 |
|
|
212
|
+
| Static AUs | r = 0.98 |
|
|
213
|
+
| Dynamic AUs | r = 0.96 |
|
|
214
|
+
|
|
215
|
+
Per-AU correlations:
|
|
216
|
+
- AU01: 0.997, AU02: 0.999, AU04: 0.989, AU05: 0.999
|
|
217
|
+
- AU06: 0.999, AU07: 0.996, AU09: 0.997, AU10: 0.994
|
|
218
|
+
- AU12: 0.998, AU14: 0.974, AU15: 0.893, AU17: 0.948
|
|
219
|
+
- AU20: 0.817, AU23: 0.996, AU25: 0.984, AU26: 0.902, AU45: 0.998
|
|
220
|
+
|
|
221
|
+
## Requirements
|
|
222
|
+
|
|
223
|
+
- Python 3.8+
|
|
224
|
+
- numpy
|
|
225
|
+
- opencv-python
|
|
226
|
+
- torch
|
|
227
|
+
- scipy
|
|
228
|
+
|
|
229
|
+
## Acknowledgments
|
|
230
|
+
|
|
231
|
+
Based on OpenFace 2.2:
|
|
232
|
+
|
|
233
|
+
> Baltrusaitis, T., Zadeh, A., Lim, Y. C., & Morency, L. P. (2018). OpenFace 2.0: Facial Behavior Analysis Toolkit. IEEE International Conference on Automatic Face and Gesture Recognition.
|
|
234
|
+
|
|
235
|
+
## Citation
|
|
236
|
+
|
|
237
|
+
If you use this in research, please cite:
|
|
238
|
+
|
|
239
|
+
> Wilson IV, J., Rosenberg, J., Gray, M. L., & Razavi, C. R. (2025). A split-face computer vision/machine learning assessment of facial paralysis using facial action units. *Facial Plastic Surgery & Aesthetic Medicine*. https://doi.org/10.1177/26893614251394382
|
|
240
|
+
|
|
241
|
+
## License
|
|
242
|
+
|
|
243
|
+
CC BY-NC 4.0 - Free for non-commercial use with attribution.
|
|
@@ -0,0 +1,193 @@
|
|
|
1
|
+
# pyfaceau
|
|
2
|
+
|
|
3
|
+
A python-based implementation of OpenFace 2.2's Facial Action Unit extraction pipeline with an accurate dlib substitute (ptmtcnn, pyclnf).
|
|
4
|
+
|
|
5
|
+
**Accuracy: r = 0.97 correlation with C++ OpenFace 2.2**
|
|
6
|
+
|
|
7
|
+
## Installation
|
|
8
|
+
|
|
9
|
+
### Option 1: Install from GitHub (Recommended for Development)
|
|
10
|
+
|
|
11
|
+
This includes model weights:
|
|
12
|
+
|
|
13
|
+
```bash
|
|
14
|
+
# Clone repository with weights
|
|
15
|
+
git clone https://github.com/johnwilsoniv/pyfaceau.git
|
|
16
|
+
cd pyfaceau
|
|
17
|
+
|
|
18
|
+
# Create conda environment (recommended)
|
|
19
|
+
conda create -n pyfaceau python=3.11
|
|
20
|
+
conda activate pyfaceau
|
|
21
|
+
|
|
22
|
+
# Install in editable mode
|
|
23
|
+
pip install -e .
|
|
24
|
+
```
|
|
25
|
+
|
|
26
|
+
### Option 2: Install from PyPI
|
|
27
|
+
|
|
28
|
+
```bash
|
|
29
|
+
pip install pyfaceau
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
This automatically installs dependencies:
|
|
33
|
+
- [pyclnf](https://github.com/johnwilsoniv/pyclnf) - Facial landmark detection (68 points)
|
|
34
|
+
- [pymtcnn](https://github.com/johnwilsoniv/pymtcnn) - Face detection
|
|
35
|
+
- [pyfhog](https://github.com/johnwilsoniv/pyfhog) - FHOG feature extraction
|
|
36
|
+
|
|
37
|
+
### Model Weights
|
|
38
|
+
|
|
39
|
+
Model weights (~50MB) are downloaded automatically on first use. If auto-download fails, download manually:
|
|
40
|
+
|
|
41
|
+
```bash
|
|
42
|
+
# Download weights
|
|
43
|
+
python -m pyfaceau.download_weights
|
|
44
|
+
|
|
45
|
+
# Or after pip install
|
|
46
|
+
pyfaceau-download-weights
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
Weights are stored in `~/.pyfaceau/weights/`. You can customize this location:
|
|
50
|
+
|
|
51
|
+
```bash
|
|
52
|
+
# Set custom weights directory
|
|
53
|
+
export PYFACEAU_WEIGHTS_DIR=/path/to/your/weights
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
### Troubleshooting: "PDM file not found"
|
|
57
|
+
|
|
58
|
+
If you see this error:
|
|
59
|
+
1. **First try**: Run `python -m pyfaceau.download_weights`
|
|
60
|
+
2. **Manual fix**: Copy the `weights/` folder from the GitHub repo to `~/.pyfaceau/weights/`
|
|
61
|
+
3. **Alternative**: Set `PYFACEAU_WEIGHTS_DIR` environment variable to point to your weights
|
|
62
|
+
|
|
63
|
+
## Quick Start
|
|
64
|
+
|
|
65
|
+
### Video Processing (Recommended)
|
|
66
|
+
|
|
67
|
+
```python
|
|
68
|
+
from pyfaceau import OpenFaceProcessor
|
|
69
|
+
|
|
70
|
+
# Initialize processor
|
|
71
|
+
processor = OpenFaceProcessor(verbose=True)
|
|
72
|
+
|
|
73
|
+
# Process video to CSV (same format as OpenFace)
|
|
74
|
+
processor.process_video("input.mp4", "output.csv")
|
|
75
|
+
```
|
|
76
|
+
|
|
77
|
+
### Batch Processing
|
|
78
|
+
|
|
79
|
+
```python
|
|
80
|
+
from pyfaceau import process_videos
|
|
81
|
+
|
|
82
|
+
# Process all videos in a directory
|
|
83
|
+
process_videos(
|
|
84
|
+
directory_path="/path/to/videos",
|
|
85
|
+
output_dir="/path/to/output"
|
|
86
|
+
)
|
|
87
|
+
```
|
|
88
|
+
|
|
89
|
+
### Frame-by-Frame Processing
|
|
90
|
+
|
|
91
|
+
```python
|
|
92
|
+
from pyfaceau import FullPythonAUPipeline
|
|
93
|
+
from pathlib import Path
|
|
94
|
+
import cv2
|
|
95
|
+
|
|
96
|
+
# Initialize pipeline with model paths
|
|
97
|
+
weights_dir = Path("weights")
|
|
98
|
+
pipeline = FullPythonAUPipeline(
|
|
99
|
+
pdm_file=str(weights_dir / "In-the-wild_aligned_PDM_68.txt"),
|
|
100
|
+
au_models_dir=str(weights_dir / "AU_predictors"),
|
|
101
|
+
triangulation_file=str(weights_dir / "tris_68_full.txt"),
|
|
102
|
+
patch_expert_file=str(weights_dir / "svr_patches_0.25_general.txt")
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
# Process single frame
|
|
106
|
+
image = cv2.imread("face.jpg")
|
|
107
|
+
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
|
108
|
+
|
|
109
|
+
result = pipeline.process_frame(image_rgb, frame_num=0)
|
|
110
|
+
|
|
111
|
+
if result['success']:
|
|
112
|
+
print("AU intensities:", result['au_intensities'])
|
|
113
|
+
print("Landmarks shape:", result['landmarks_2d'].shape) # (68, 2)
|
|
114
|
+
print("Pose (pitch, yaw, roll):", result['pose'])
|
|
115
|
+
```
|
|
116
|
+
|
|
117
|
+
## Output Format
|
|
118
|
+
|
|
119
|
+
### CSV Output Columns
|
|
120
|
+
|
|
121
|
+
The output CSV matches OpenFace format:
|
|
122
|
+
- `frame` - Frame number
|
|
123
|
+
- `timestamp` - Time in seconds
|
|
124
|
+
- `confidence` - Detection confidence
|
|
125
|
+
- `success` - Whether face was detected
|
|
126
|
+
- `AU01_r` through `AU45_r` - AU intensities (0.0 - 5.0)
|
|
127
|
+
- `pose_Rx`, `pose_Ry`, `pose_Rz` - Head pose in radians
|
|
128
|
+
- `x_0` through `x_67`, `y_0` through `y_67` - 68 landmark coordinates
|
|
129
|
+
|
|
130
|
+
### Action Units
|
|
131
|
+
|
|
132
|
+
17 facial action units with intensity values (0.0 - 5.0):
|
|
133
|
+
|
|
134
|
+
| AU | Description |
|
|
135
|
+
|----|-------------|
|
|
136
|
+
| AU01 | Inner Brow Raiser |
|
|
137
|
+
| AU02 | Outer Brow Raiser |
|
|
138
|
+
| AU04 | Brow Lowerer |
|
|
139
|
+
| AU05 | Upper Lid Raiser |
|
|
140
|
+
| AU06 | Cheek Raiser |
|
|
141
|
+
| AU07 | Lid Tightener |
|
|
142
|
+
| AU09 | Nose Wrinkler |
|
|
143
|
+
| AU10 | Upper Lip Raiser |
|
|
144
|
+
| AU12 | Lip Corner Puller |
|
|
145
|
+
| AU14 | Dimpler |
|
|
146
|
+
| AU15 | Lip Corner Depressor |
|
|
147
|
+
| AU17 | Chin Raiser |
|
|
148
|
+
| AU20 | Lip Stretcher |
|
|
149
|
+
| AU23 | Lip Tightener |
|
|
150
|
+
| AU25 | Lips Part |
|
|
151
|
+
| AU26 | Jaw Drop |
|
|
152
|
+
| AU45 | Blink |
|
|
153
|
+
|
|
154
|
+
## Accuracy
|
|
155
|
+
|
|
156
|
+
Validated against C++ OpenFace 2.2
|
|
157
|
+
|
|
158
|
+
| Metric | Correlation |
|
|
159
|
+
|--------|-------------|
|
|
160
|
+
| **Overall Mean** | r = 0.97 |
|
|
161
|
+
| **Overall Median** | r = 0.996 |
|
|
162
|
+
| Static AUs | r = 0.98 |
|
|
163
|
+
| Dynamic AUs | r = 0.96 |
|
|
164
|
+
|
|
165
|
+
Per-AU correlations:
|
|
166
|
+
- AU01: 0.997, AU02: 0.999, AU04: 0.989, AU05: 0.999
|
|
167
|
+
- AU06: 0.999, AU07: 0.996, AU09: 0.997, AU10: 0.994
|
|
168
|
+
- AU12: 0.998, AU14: 0.974, AU15: 0.893, AU17: 0.948
|
|
169
|
+
- AU20: 0.817, AU23: 0.996, AU25: 0.984, AU26: 0.902, AU45: 0.998
|
|
170
|
+
|
|
171
|
+
## Requirements
|
|
172
|
+
|
|
173
|
+
- Python 3.8+
|
|
174
|
+
- numpy
|
|
175
|
+
- opencv-python
|
|
176
|
+
- torch
|
|
177
|
+
- scipy
|
|
178
|
+
|
|
179
|
+
## Acknowledgments
|
|
180
|
+
|
|
181
|
+
Based on OpenFace 2.2:
|
|
182
|
+
|
|
183
|
+
> Baltrusaitis, T., Zadeh, A., Lim, Y. C., & Morency, L. P. (2018). OpenFace 2.0: Facial Behavior Analysis Toolkit. IEEE International Conference on Automatic Face and Gesture Recognition.
|
|
184
|
+
|
|
185
|
+
## Citation
|
|
186
|
+
|
|
187
|
+
If you use this in research, please cite:
|
|
188
|
+
|
|
189
|
+
> Wilson IV, J., Rosenberg, J., Gray, M. L., & Razavi, C. R. (2025). A split-face computer vision/machine learning assessment of facial paralysis using facial action units. *Facial Plastic Surgery & Aesthetic Medicine*. https://doi.org/10.1177/26893614251394382
|
|
190
|
+
|
|
191
|
+
## License
|
|
192
|
+
|
|
193
|
+
CC BY-NC 4.0 - Free for non-commercial use with attribution.
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
"""
|
|
2
|
+
pyfaceau - Pure Python OpenFace 2.2 AU Extraction
|
|
3
|
+
|
|
4
|
+
A complete Python implementation of OpenFace 2.2's AU extraction pipeline
|
|
5
|
+
with high-performance parallel processing support and CLNF landmark refinement.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
__version__ = "1.3.10"
|
|
9
|
+
|
|
10
|
+
# Weight management functions can be imported without heavy dependencies
|
|
11
|
+
from .download_weights import (
|
|
12
|
+
download_weights,
|
|
13
|
+
ensure_weights,
|
|
14
|
+
get_weights_dir,
|
|
15
|
+
weights_exist
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def __getattr__(name):
|
|
20
|
+
"""Lazy import of heavy modules to speed up package loading."""
|
|
21
|
+
if name == 'FullPythonAUPipeline':
|
|
22
|
+
from .pipeline import FullPythonAUPipeline
|
|
23
|
+
return FullPythonAUPipeline
|
|
24
|
+
elif name == 'ParallelAUPipeline':
|
|
25
|
+
from .parallel_pipeline import ParallelAUPipeline
|
|
26
|
+
return ParallelAUPipeline
|
|
27
|
+
elif name == 'OpenFaceProcessor':
|
|
28
|
+
from .processor import OpenFaceProcessor
|
|
29
|
+
return OpenFaceProcessor
|
|
30
|
+
elif name == 'process_videos':
|
|
31
|
+
from .processor import process_videos
|
|
32
|
+
return process_videos
|
|
33
|
+
raise AttributeError(f"module 'pyfaceau' has no attribute '{name}'")
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
__all__ = [
|
|
37
|
+
'FullPythonAUPipeline',
|
|
38
|
+
'ParallelAUPipeline',
|
|
39
|
+
'OpenFaceProcessor',
|
|
40
|
+
'process_videos',
|
|
41
|
+
# Weight management
|
|
42
|
+
'download_weights',
|
|
43
|
+
'ensure_weights',
|
|
44
|
+
'get_weights_dir',
|
|
45
|
+
'weights_exist',
|
|
46
|
+
]
|