pyfaceau 1.0.3__cp312-cp312-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. pyfaceau/__init__.py +19 -0
  2. pyfaceau/alignment/__init__.py +0 -0
  3. pyfaceau/alignment/calc_params.py +671 -0
  4. pyfaceau/alignment/face_aligner.py +352 -0
  5. pyfaceau/alignment/numba_calcparams_accelerator.py +244 -0
  6. pyfaceau/cython_histogram_median.cp312-win_amd64.pyd +0 -0
  7. pyfaceau/cython_rotation_update.cp312-win_amd64.pyd +0 -0
  8. pyfaceau/detectors/__init__.py +0 -0
  9. pyfaceau/detectors/pfld.py +128 -0
  10. pyfaceau/detectors/retinaface.py +352 -0
  11. pyfaceau/download_weights.py +134 -0
  12. pyfaceau/features/__init__.py +0 -0
  13. pyfaceau/features/histogram_median_tracker.py +335 -0
  14. pyfaceau/features/pdm.py +269 -0
  15. pyfaceau/features/triangulation.py +64 -0
  16. pyfaceau/parallel_pipeline.py +462 -0
  17. pyfaceau/pipeline.py +1083 -0
  18. pyfaceau/prediction/__init__.py +0 -0
  19. pyfaceau/prediction/au_predictor.py +434 -0
  20. pyfaceau/prediction/batched_au_predictor.py +269 -0
  21. pyfaceau/prediction/model_parser.py +337 -0
  22. pyfaceau/prediction/running_median.py +318 -0
  23. pyfaceau/prediction/running_median_fallback.py +200 -0
  24. pyfaceau/processor.py +270 -0
  25. pyfaceau/refinement/__init__.py +12 -0
  26. pyfaceau/refinement/svr_patch_expert.py +361 -0
  27. pyfaceau/refinement/targeted_refiner.py +362 -0
  28. pyfaceau/utils/__init__.py +0 -0
  29. pyfaceau/utils/cython_extensions/cython_histogram_median.c +35391 -0
  30. pyfaceau/utils/cython_extensions/cython_histogram_median.pyx +316 -0
  31. pyfaceau/utils/cython_extensions/cython_rotation_update.c +32262 -0
  32. pyfaceau/utils/cython_extensions/cython_rotation_update.pyx +211 -0
  33. pyfaceau/utils/cython_extensions/setup.py +47 -0
  34. pyfaceau-1.0.3.data/scripts/pyfaceau_gui.py +302 -0
  35. pyfaceau-1.0.3.dist-info/METADATA +466 -0
  36. pyfaceau-1.0.3.dist-info/RECORD +40 -0
  37. pyfaceau-1.0.3.dist-info/WHEEL +5 -0
  38. pyfaceau-1.0.3.dist-info/entry_points.txt +3 -0
  39. pyfaceau-1.0.3.dist-info/licenses/LICENSE +40 -0
  40. pyfaceau-1.0.3.dist-info/top_level.txt +1 -0
@@ -0,0 +1,200 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Running Median Tracker for Dynamic AU Models
4
+
5
+ Implements person-specific normalization by tracking the running median
6
+ of feature vectors across video frames. This removes person-specific biases
7
+ in neutral expressions.
8
+
9
+ Reference: OpenFace 2.2 FaceAnalyser::UpdateRunningMedian()
10
+ """
11
+
12
+ import numpy as np
13
+ from collections import deque
14
+
15
+
16
+ class RunningMedianTracker:
17
+ """
18
+ Tracks running median of feature vectors for person-specific normalization
19
+
20
+ Used by dynamic AU models to normalize for individual neutral expressions.
21
+ """
22
+
23
+ def __init__(self, feature_dim: int, window_size: int = 200):
24
+ """
25
+ Initialize running median tracker
26
+
27
+ Args:
28
+ feature_dim: Dimensionality of feature vectors
29
+ window_size: Number of frames to use for median calculation
30
+ (OF2.2 uses histogram with bins, we use rolling window)
31
+ """
32
+ self.feature_dim = feature_dim
33
+ self.window_size = window_size
34
+
35
+ # Rolling window of feature vectors (stores last N frames)
36
+ self.history = deque(maxlen=window_size)
37
+
38
+ # Current median estimate
39
+ self.current_median = np.zeros(feature_dim, dtype=np.float64)
40
+
41
+ # Frame counter
42
+ self.frame_count = 0
43
+
44
+ def update(self, features: np.ndarray, update_median: bool = True) -> None:
45
+ """
46
+ Update tracker with new feature vector
47
+
48
+ Args:
49
+ features: Feature vector (1D array)
50
+ update_median: Whether to recompute median (OF2.2 does this every 2nd frame)
51
+ """
52
+ # Ensure 1D
53
+ if features.ndim == 2:
54
+ features = features.flatten()
55
+
56
+ assert features.shape[0] == self.feature_dim, \
57
+ f"Expected {self.feature_dim} features, got {features.shape[0]}"
58
+
59
+ # Add to history
60
+ self.history.append(features.copy())
61
+ self.frame_count += 1
62
+
63
+ # Update median (only if requested and have enough samples)
64
+ if update_median and len(self.history) >= 10:
65
+ # Convert history to array (N_frames, N_features)
66
+ history_array = np.array(self.history)
67
+
68
+ # Compute median along frame axis
69
+ self.current_median = np.median(history_array, axis=0)
70
+
71
+ def get_median(self) -> np.ndarray:
72
+ """
73
+ Get current running median
74
+
75
+ Returns:
76
+ Median feature vector (1D array)
77
+ """
78
+ return self.current_median.copy()
79
+
80
+ def reset(self) -> None:
81
+ """Reset tracker (e.g., for new video)"""
82
+ self.history.clear()
83
+ self.current_median = np.zeros(self.feature_dim, dtype=np.float64)
84
+ self.frame_count = 0
85
+
86
+
87
+ class DualMedianTracker:
88
+ """
89
+ Manages separate running medians for HOG and geometric features
90
+
91
+ OF2.2 tracks HOG and geometric features separately, then concatenates
92
+ them when computing dynamic model predictions.
93
+ """
94
+
95
+ def __init__(self, hog_dim: int = 4464, geom_dim: int = 238, window_size: int = 200):
96
+ """
97
+ Initialize dual median tracker
98
+
99
+ Args:
100
+ hog_dim: HOG feature dimensionality
101
+ geom_dim: Geometric feature dimensionality
102
+ window_size: Rolling window size
103
+ """
104
+ self.hog_tracker = RunningMedianTracker(hog_dim, window_size)
105
+ self.geom_tracker = RunningMedianTracker(geom_dim, window_size)
106
+
107
+ def update(self, hog_features: np.ndarray, geom_features: np.ndarray,
108
+ update_median: bool = True) -> None:
109
+ """
110
+ Update both trackers
111
+
112
+ Args:
113
+ hog_features: HOG feature vector
114
+ geom_features: Geometric feature vector
115
+ update_median: Whether to recompute medians
116
+ """
117
+ self.hog_tracker.update(hog_features, update_median)
118
+ self.geom_tracker.update(geom_features, update_median)
119
+
120
+ def get_combined_median(self) -> np.ndarray:
121
+ """
122
+ Get concatenated [HOG_median, geom_median]
123
+
124
+ Returns:
125
+ Combined median vector (4702 dims)
126
+ """
127
+ hog_median = self.hog_tracker.get_median()
128
+ geom_median = self.geom_tracker.get_median()
129
+ return np.concatenate([hog_median, geom_median])
130
+
131
+ def get_hog_median(self) -> np.ndarray:
132
+ """Get HOG median only"""
133
+ return self.hog_tracker.get_median()
134
+
135
+ def get_geom_median(self) -> np.ndarray:
136
+ """Get geometric median only"""
137
+ return self.geom_tracker.get_median()
138
+
139
+ def reset(self) -> None:
140
+ """Reset both trackers"""
141
+ self.hog_tracker.reset()
142
+ self.geom_tracker.reset()
143
+
144
+
145
+ def test_running_median():
146
+ """Test running median tracker"""
147
+ print("="*80)
148
+ print("Running Median Tracker - Test")
149
+ print("="*80)
150
+
151
+ # Create tracker
152
+ tracker = RunningMedianTracker(feature_dim=10, window_size=100)
153
+
154
+ # Generate synthetic data (random walk around mean)
155
+ np.random.seed(42)
156
+ mean_features = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0])
157
+
158
+ print("\nSimulating 200 frames...")
159
+ for i in range(200):
160
+ # Add noise to mean
161
+ noise = np.random.randn(10) * 0.5
162
+ features = mean_features + noise
163
+
164
+ # Update tracker (update median every frame for testing)
165
+ tracker.update(features, update_median=True)
166
+
167
+ if i % 50 == 49:
168
+ median = tracker.get_median()
169
+ print(f"\nFrame {i+1}:")
170
+ print(f" True mean: {mean_features[:3]}...")
171
+ print(f" Running median: {median[:3]}...")
172
+ print(f" Error: {np.abs(median - mean_features).mean():.6f}")
173
+
174
+ print("\nTracker converges to true mean!")
175
+
176
+ # Test dual tracker
177
+ print("\n" + "="*80)
178
+ print("Dual Median Tracker - Test")
179
+ print("="*80)
180
+
181
+ dual_tracker = DualMedianTracker(hog_dim=4464, geom_dim=238)
182
+
183
+ print("\nUpdating with synthetic HOG and geometric features...")
184
+ for i in range(100):
185
+ hog_feat = np.random.randn(4464)
186
+ geom_feat = np.random.randn(238)
187
+ dual_tracker.update(hog_feat, geom_feat, update_median=(i % 2 == 0))
188
+
189
+ combined = dual_tracker.get_combined_median()
190
+ print(f"Combined median shape: {combined.shape}")
191
+ print(f"Expected: (4702,)")
192
+ assert combined.shape == (4702,)
193
+
194
+ print("\n" + "="*80)
195
+ print("All tests passed!")
196
+ print("="*80)
197
+
198
+
199
+ if __name__ == "__main__":
200
+ test_running_median()
pyfaceau/processor.py ADDED
@@ -0,0 +1,270 @@
1
+ """
2
+ OpenFace-compatible AU extraction processor.
3
+
4
+ This module provides a drop-in replacement for OpenFace 3.0
5
+ with the same API for easy integration into existing workflows
6
+ like S1 Face Mirror.
7
+ """
8
+
9
+ import cv2
10
+ import csv
11
+ import numpy as np
12
+ from pathlib import Path
13
+ from typing import Optional, Callable
14
+ from .pipeline import FullPythonAUPipeline
15
+
16
+
17
+ class OpenFaceProcessor:
18
+ """
19
+ OpenFace 2.2-compatible AU extraction processor.
20
+
21
+ Drop-in replacement for OpenFace 3.0 with the pyfaceau pipeline.
22
+ Designed for seamless integration with S1 Face Mirror and other
23
+ OpenFace-based applications.
24
+
25
+ Features:
26
+ - 17 Action Units (AU01-AU45)
27
+ - r > 0.92 correlation with OpenFace 2.2 C++
28
+ - CLNF landmark refinement
29
+ - Real-time capable (72 fps)
30
+ - 100% Python (no compilation)
31
+
32
+ Example:
33
+ ```python
34
+ processor = OpenFaceProcessor(
35
+ weights_dir='weights/',
36
+ use_clnf_refinement=True
37
+ )
38
+
39
+ processor.process_video(
40
+ 'input.mp4',
41
+ 'output.csv',
42
+ progress_callback=my_callback
43
+ )
44
+ ```
45
+ """
46
+
47
+ def __init__(
48
+ self,
49
+ device: Optional[str] = None,
50
+ weights_dir: Optional[str] = None,
51
+ use_clnf_refinement: bool = True,
52
+ num_threads: int = 6,
53
+ verbose: bool = False,
54
+ **kwargs
55
+ ):
56
+ """
57
+ Initialize OpenFace AU extraction processor.
58
+
59
+ Args:
60
+ device: Unused (kept for API compatibility). PyFaceAU auto-detects.
61
+ weights_dir: Path to weights directory (default: ./weights)
62
+ use_clnf_refinement: Enable CLNF landmark refinement (default: True)
63
+ num_threads: Unused (kept for API compatibility)
64
+ verbose: Enable verbose logging (default: False)
65
+ **kwargs: Additional arguments (ignored for compatibility)
66
+ """
67
+ self.verbose = verbose
68
+
69
+ # Determine weights directory
70
+ if weights_dir is None:
71
+ script_dir = Path(__file__).parent.parent
72
+ weights_dir = script_dir / 'weights'
73
+ else:
74
+ weights_dir = Path(weights_dir)
75
+
76
+ if self.verbose:
77
+ print("Initializing PyFaceAU (OpenFace 2.2 Python replacement)...")
78
+
79
+ # Initialize the PyFaceAU pipeline
80
+ self.pipeline = FullPythonAUPipeline(
81
+ retinaface_model=str(weights_dir / 'retinaface_mobilenet025_coreml.onnx'),
82
+ pfld_model=str(weights_dir / 'pfld_cunjian.onnx'),
83
+ pdm_file=str(weights_dir / 'In-the-wild_aligned_PDM_68.txt'),
84
+ au_models_dir=str(weights_dir / 'AU_predictors'),
85
+ triangulation_file=str(weights_dir / 'tris_68_full.txt'),
86
+ patch_expert_file=str(weights_dir / 'svr_patches_0.25_general.txt') if use_clnf_refinement else None,
87
+ use_clnf_refinement=use_clnf_refinement,
88
+ use_batched_predictor=True,
89
+ verbose=verbose
90
+ )
91
+
92
+ if self.verbose:
93
+ print(f" PyFaceAU initialized")
94
+ print(f" CLNF refinement: {'Enabled' if use_clnf_refinement else 'Disabled'}")
95
+ print(f" Expected accuracy: r > 0.92 (OpenFace 2.2 correlation)")
96
+ print()
97
+
98
+ def process_video(
99
+ self,
100
+ video_path: str,
101
+ output_csv_path: str,
102
+ progress_callback: Optional[Callable[[int, int, float], None]] = None
103
+ ) -> int:
104
+ """
105
+ Process video and extract AUs.
106
+
107
+ Compatible with S1 Face Mirror integration and other OpenFace-based
108
+ applications.
109
+
110
+ Args:
111
+ video_path: Path to input video file
112
+ output_csv_path: Path to output CSV file
113
+ progress_callback: Optional callback function(current, total, fps)
114
+ for progress updates
115
+
116
+ Returns:
117
+ Number of frames successfully processed
118
+ """
119
+ video_path = Path(video_path)
120
+ output_csv_path = Path(output_csv_path)
121
+
122
+ if self.verbose:
123
+ print(f"Processing: {video_path.name}")
124
+
125
+ # Ensure output directory exists
126
+ output_csv_path.parent.mkdir(parents=True, exist_ok=True)
127
+
128
+ # Process video through pipeline
129
+ try:
130
+ df = self.pipeline.process_video(
131
+ video_path=str(video_path),
132
+ output_csv=str(output_csv_path),
133
+ max_frames=None,
134
+ progress_callback=progress_callback
135
+ )
136
+
137
+ success_count = df['success'].sum()
138
+
139
+ if self.verbose:
140
+ total_frames = len(df)
141
+ print(f" Processed {success_count}/{total_frames} frames successfully")
142
+ if success_count < total_frames:
143
+ failed = total_frames - success_count
144
+ print(f" {failed} frames failed (no face detected)")
145
+ print(f" Output: {output_csv_path}")
146
+
147
+ return int(success_count)
148
+
149
+ except Exception as e:
150
+ if self.verbose:
151
+ print(f" Error processing video: {e}")
152
+ raise
153
+
154
+ def clear_cache(self):
155
+ """
156
+ Clear any internal caches to free memory.
157
+
158
+ Call this if processing many videos with different resolutions
159
+ and want to free up cache memory between batches.
160
+ """
161
+ # PyFaceAU pipeline doesn't currently use caching
162
+ # This method is kept for API compatibility
163
+ pass
164
+
165
+
166
+ def process_videos(
167
+ directory_path: str,
168
+ specific_files: Optional[list] = None,
169
+ output_dir: Optional[str] = None,
170
+ **processor_kwargs
171
+ ) -> int:
172
+ """
173
+ Process multiple video files using OpenFaceProcessor.
174
+
175
+ This function provides batch processing capability compatible with
176
+ S1 Face Mirror workflows.
177
+
178
+ Args:
179
+ directory_path: Path to directory containing video files
180
+ specific_files: List of specific files to process (optional)
181
+ output_dir: Output directory for CSV files (optional)
182
+ **processor_kwargs: Additional arguments passed to OpenFaceProcessor
183
+
184
+ Returns:
185
+ Number of files successfully processed
186
+
187
+ Example:
188
+ ```python
189
+ # Process all mirrored videos in a directory
190
+ count = process_videos(
191
+ directory_path='/path/to/mirrored/videos',
192
+ output_dir='/path/to/output',
193
+ use_clnf_refinement=True
194
+ )
195
+ print(f"Processed {count} videos")
196
+ ```
197
+ """
198
+ directory_path = Path(directory_path)
199
+
200
+ # Check if directory exists
201
+ if not directory_path.is_dir():
202
+ print(f"Error: Directory '{directory_path}' does not exist.")
203
+ return 0
204
+
205
+ # Determine output directory
206
+ if output_dir is None:
207
+ # Default: S1O Processed Files/Combined Data/
208
+ s1o_base = directory_path.parent.parent / 'S1O Processed Files'
209
+ output_dir = s1o_base / 'Combined Data'
210
+ else:
211
+ output_dir = Path(output_dir)
212
+
213
+ output_dir.mkdir(parents=True, exist_ok=True)
214
+ print(f"Output directory: {output_dir}")
215
+
216
+ # Initialize processor
217
+ processor = OpenFaceProcessor(**processor_kwargs)
218
+
219
+ # Counter for processed files
220
+ processed_count = 0
221
+
222
+ # Define which files to process
223
+ files_to_process = []
224
+
225
+ if specific_files:
226
+ # Process only the specific files
227
+ files_to_process = [Path(f) for f in specific_files]
228
+ print(f"Processing {len(files_to_process)} specific files from current session.")
229
+ else:
230
+ # Process all eligible files in the directory
231
+ files_to_process = list(directory_path.iterdir())
232
+ print(f"Processing all eligible files in {directory_path}")
233
+
234
+ # Process each file
235
+ for file_path in files_to_process:
236
+ # Skip if not a file or doesn't exist
237
+ if not file_path.is_file():
238
+ print(f"Warning: {file_path} does not exist or is not a file. Skipping.")
239
+ continue
240
+
241
+ filename = file_path.name
242
+
243
+ # Skip files with 'debug' in the filename
244
+ if 'debug' in filename:
245
+ print(f"Skipping debug file: {filename}")
246
+ continue
247
+
248
+ # Process file with 'mirrored' in the filename
249
+ if 'mirrored' in filename:
250
+ # Generate output CSV filename
251
+ # Example: "video_left_mirrored.mp4" -> "video_left_mirrored.csv"
252
+ csv_filename = file_path.stem + '.csv'
253
+ output_csv_path = output_dir / csv_filename
254
+
255
+ try:
256
+ # Process video and extract AUs
257
+ frame_count = processor.process_video(file_path, output_csv_path)
258
+
259
+ if frame_count > 0:
260
+ processed_count += 1
261
+ print(f"Successfully processed: {filename}\n")
262
+ else:
263
+ print(f"Failed to process: {filename}\n")
264
+
265
+ except Exception as e:
266
+ print(f"Error processing {filename}: {e}\n")
267
+
268
+ print(f"\nProcessing complete. {processed_count} files were processed.")
269
+
270
+ return processed_count
@@ -0,0 +1,12 @@
1
+ """
2
+ CLNF landmark refinement module for PyFaceAU
3
+
4
+ This module implements targeted CLNF refinement to improve PFLD landmark accuracy,
5
+ specifically for brow landmarks (17-26) and lip corners (48, 54) which are critical
6
+ for AU01, AU02, and AU23 detection.
7
+ """
8
+
9
+ from .svr_patch_expert import SVRPatchExpert, SVRPatchExpertLoader
10
+ from .targeted_refiner import TargetedCLNFRefiner
11
+
12
+ __all__ = ['SVRPatchExpert', 'SVRPatchExpertLoader', 'TargetedCLNFRefiner']