pyfaceau 1.0.6__cp312-cp312-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. pyfaceau/__init__.py +19 -0
  2. pyfaceau/alignment/__init__.py +0 -0
  3. pyfaceau/alignment/calc_params.py +671 -0
  4. pyfaceau/alignment/face_aligner.py +352 -0
  5. pyfaceau/alignment/numba_calcparams_accelerator.py +244 -0
  6. pyfaceau/cython_histogram_median.cpython-312-darwin.so +0 -0
  7. pyfaceau/cython_rotation_update.cpython-312-darwin.so +0 -0
  8. pyfaceau/detectors/__init__.py +0 -0
  9. pyfaceau/detectors/pfld.py +128 -0
  10. pyfaceau/detectors/retinaface.py +352 -0
  11. pyfaceau/download_weights.py +134 -0
  12. pyfaceau/features/__init__.py +0 -0
  13. pyfaceau/features/histogram_median_tracker.py +335 -0
  14. pyfaceau/features/pdm.py +269 -0
  15. pyfaceau/features/triangulation.py +64 -0
  16. pyfaceau/parallel_pipeline.py +462 -0
  17. pyfaceau/pipeline.py +1083 -0
  18. pyfaceau/prediction/__init__.py +0 -0
  19. pyfaceau/prediction/au_predictor.py +434 -0
  20. pyfaceau/prediction/batched_au_predictor.py +269 -0
  21. pyfaceau/prediction/model_parser.py +337 -0
  22. pyfaceau/prediction/running_median.py +318 -0
  23. pyfaceau/prediction/running_median_fallback.py +200 -0
  24. pyfaceau/processor.py +270 -0
  25. pyfaceau/refinement/__init__.py +12 -0
  26. pyfaceau/refinement/svr_patch_expert.py +361 -0
  27. pyfaceau/refinement/targeted_refiner.py +362 -0
  28. pyfaceau/utils/__init__.py +0 -0
  29. pyfaceau/utils/cython_extensions/cython_histogram_median.c +35391 -0
  30. pyfaceau/utils/cython_extensions/cython_histogram_median.pyx +316 -0
  31. pyfaceau/utils/cython_extensions/cython_rotation_update.c +32262 -0
  32. pyfaceau/utils/cython_extensions/cython_rotation_update.pyx +211 -0
  33. pyfaceau/utils/cython_extensions/setup.py +47 -0
  34. pyfaceau-1.0.6.data/scripts/pyfaceau_gui.py +302 -0
  35. pyfaceau-1.0.6.dist-info/METADATA +466 -0
  36. pyfaceau-1.0.6.dist-info/RECORD +40 -0
  37. pyfaceau-1.0.6.dist-info/WHEEL +5 -0
  38. pyfaceau-1.0.6.dist-info/entry_points.txt +3 -0
  39. pyfaceau-1.0.6.dist-info/licenses/LICENSE +40 -0
  40. pyfaceau-1.0.6.dist-info/top_level.txt +1 -0
@@ -0,0 +1,128 @@
1
+ #!/usr/bin/env python3
2
+ """Cunjian PFLD landmark detector wrapper for AU extraction pipeline"""
3
+
4
+ import numpy as np
5
+ import cv2
6
+ import onnxruntime as ort
7
+
8
+
9
+ class CunjianPFLDDetector:
10
+ """
11
+ Wrapper for cunjian's PFLD 68-point landmark detector.
12
+
13
+ Model: PFLD_ExternalData (112×112)
14
+ Published accuracy: 3.97% NME on 300W Full Set
15
+ Measured accuracy: 4.37% NME on our validation
16
+ Size: 2.9MB
17
+ Speed: 0.01s per face
18
+ """
19
+
20
+ def __init__(self, model_path, use_coreml=True):
21
+ """Initialize the PFLD detector.
22
+
23
+ Args:
24
+ model_path: Path to the ONNX model file
25
+ use_coreml: Whether to attempt CoreML acceleration on Apple Silicon (default: True)
26
+ """
27
+ # Configure execution providers for Apple Silicon Neural Engine acceleration
28
+ if use_coreml:
29
+ providers = [
30
+ ('CoreMLExecutionProvider', {
31
+ 'MLComputeUnits': 'ALL', # Use Neural Engine + GPU + CPU
32
+ 'ModelFormat': 'MLProgram', # Use latest CoreML format
33
+ }),
34
+ 'CPUExecutionProvider' # Fallback
35
+ ]
36
+ else:
37
+ providers = ['CPUExecutionProvider']
38
+
39
+ # Suppress CoreML compilation warnings
40
+ import warnings
41
+ with warnings.catch_warnings():
42
+ warnings.filterwarnings('ignore')
43
+ self.session = ort.InferenceSession(model_path, providers=providers)
44
+
45
+ # Check which provider is active
46
+ active_providers = self.session.get_providers()
47
+ if 'CoreMLExecutionProvider' in active_providers:
48
+ print("PFLD using CoreML Neural Engine acceleration (2-3x speedup)")
49
+ else:
50
+ print("Warning: PFLD using CPU execution (CoreML unavailable)")
51
+
52
+ self.input_name = self.session.get_inputs()[0].name
53
+
54
+ # Model expects 112x112 RGB input normalized to [0, 1]
55
+ self.input_size = 112
56
+
57
+ def detect_landmarks(self, frame, bbox):
58
+ """Detect 68 facial landmarks.
59
+
60
+ Args:
61
+ frame: BGR image (HxWx3)
62
+ bbox: Face bounding box [x_min, y_min, x_max, y_max]
63
+
64
+ Returns:
65
+ landmarks: (68, 2) array of (x, y) coordinates in original image space
66
+ confidence: Dummy confidence (always 1.0 for this model)
67
+ """
68
+ x_min, y_min, x_max, y_max = bbox
69
+
70
+ # Calculate square bbox with 10% padding (cunjian approach)
71
+ w = x_max - x_min
72
+ h = y_max - y_min
73
+ size = int(max([w, h]) * 1.1)
74
+ cx = int(x_min + w / 2)
75
+ cy = int(y_min + h / 2)
76
+ x1 = cx - size // 2
77
+ x2 = x1 + size
78
+ y1 = cy - size // 2
79
+ y2 = y1 + size
80
+
81
+ # Clip to image bounds and add padding if needed
82
+ height, width = frame.shape[:2]
83
+ dx = max(0, -x1)
84
+ dy = max(0, -y1)
85
+ x1 = max(0, x1)
86
+ y1 = max(0, y1)
87
+ edx = max(0, x2 - width)
88
+ edy = max(0, y2 - height)
89
+ x2 = min(width, x2)
90
+ y2 = min(height, y2)
91
+
92
+ # Crop face
93
+ cropped = frame[y1:y2, x1:x2]
94
+
95
+ # Add border padding if face was at edge
96
+ if dx > 0 or dy > 0 or edx > 0 or edy > 0:
97
+ cropped = cv2.copyMakeBorder(cropped, int(dy), int(edy), int(dx), int(edx),
98
+ cv2.BORDER_CONSTANT, 0)
99
+
100
+ # Preprocess: BGR -> RGB, resize to 112x112, normalize to [0, 1]
101
+ cropped_rgb = cv2.cvtColor(cropped, cv2.COLOR_BGR2RGB)
102
+ face_resized = cv2.resize(cropped_rgb, (self.input_size, self.input_size))
103
+ face_normalized = face_resized.astype(np.float32) / 255.0
104
+ face_input = np.transpose(face_normalized, (2, 0, 1)) # HWC -> CHW
105
+ face_input = np.expand_dims(face_input, axis=0) # Add batch dimension
106
+
107
+ # Run inference
108
+ output = self.session.run(None, {self.input_name: face_input})[0]
109
+
110
+ # Output is (1, 136) -> reshape to (68, 2) normalized coordinates
111
+ landmarks = output.reshape(-1, 2)
112
+
113
+ # Reproject from normalized [0, 1] space to original image coordinates
114
+ bbox_w = x2 - x1
115
+ bbox_h = y2 - y1
116
+ landmarks_reprojected = np.zeros_like(landmarks)
117
+ for i, point in enumerate(landmarks):
118
+ landmarks_reprojected[i, 0] = point[0] * bbox_w + x1
119
+ landmarks_reprojected[i, 1] = point[1] * bbox_h + y1
120
+
121
+ # Return dummy confidence (PFLD doesn't output per-landmark confidence)
122
+ confidence = 1.0
123
+
124
+ return landmarks_reprojected, confidence
125
+
126
+ def __repr__(self):
127
+ return (f"CunjianPFLDDetector(input_size={self.input_size}, "
128
+ f"landmarks=68, accuracy=4.37% NME)")
@@ -0,0 +1,352 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Optimized RetinaFace detector using ONNX Runtime with CoreML acceleration.
4
+
5
+ This module provides a drop-in replacement for the PyTorch-based RetinaFace detector,
6
+ optimized for Apple Silicon using the Neural Engine via CoreML execution provider.
7
+
8
+ Expected performance: 5-10x speedup (from ~191ms to ~20-40ms per detection)
9
+ """
10
+
11
+ import numpy as np
12
+ import torch
13
+ import onnxruntime as ort
14
+ from typing import Tuple
15
+
16
+ # Import RetinaFace post-processing utilities
17
+ from openface.Pytorch_Retinaface.layers.functions.prior_box import PriorBox
18
+ from openface.Pytorch_Retinaface.utils.box_utils import decode, decode_landm
19
+ from openface.Pytorch_Retinaface.utils.nms.py_cpu_nms import py_cpu_nms
20
+ from openface.Pytorch_Retinaface.data import cfg_mnet
21
+
22
+ # Import performance profiler
23
+ from performance_profiler import get_profiler
24
+
25
+
26
+ class ONNXRetinaFaceDetector:
27
+ """
28
+ ONNX-accelerated RetinaFace detector for Apple Silicon
29
+
30
+ This class provides the same interface as OpenFace 3.0's FaceDetector,
31
+ but uses ONNX Runtime with CoreML execution provider for massive speedup.
32
+ """
33
+
34
+ def __init__(self, onnx_model_path: str, use_coreml: bool = True,
35
+ confidence_threshold: float = 0.02,
36
+ nms_threshold: float = 0.4,
37
+ vis_threshold: float = 0.5):
38
+ """
39
+ Initialize ONNX RetinaFace detector
40
+
41
+ Args:
42
+ onnx_model_path: Path to converted ONNX model
43
+ use_coreml: Whether to attempt CoreML execution provider (default: True)
44
+ confidence_threshold: Minimum confidence for face detection (default: 0.02)
45
+ nms_threshold: NMS threshold for duplicate suppression (default: 0.4)
46
+ vis_threshold: Visibility threshold for filtering weak detections (default: 0.5)
47
+ """
48
+ self.confidence_threshold = confidence_threshold
49
+ self.nms_threshold = nms_threshold
50
+ self.vis_threshold = vis_threshold
51
+ self.cfg = cfg_mnet
52
+
53
+ # Configure execution providers
54
+ # NOTE: CoreML may not fully support all RetinaFace operations
55
+ # We try CoreML first, but gracefully fall back to optimized CPU execution
56
+ if use_coreml:
57
+ providers = [
58
+ ('CoreMLExecutionProvider', {
59
+ 'MLComputeUnits': 'ALL', # Use Neural Engine + GPU + CPU
60
+ 'ModelFormat': 'MLProgram', # Use latest CoreML format
61
+ }),
62
+ 'CPUExecutionProvider' # Fallback
63
+ ]
64
+ else:
65
+ providers = ['CPUExecutionProvider']
66
+
67
+ # Load ONNX model
68
+ print(f"Loading ONNX RetinaFace model from: {onnx_model_path}")
69
+
70
+ if use_coreml:
71
+ print("")
72
+ print("=" * 70)
73
+ print("⏰ CoreML First-Time Compilation Notice:")
74
+ print(" If this is the first time loading this model with CoreML,")
75
+ print(" compilation may take 30-60 seconds (one-time only).")
76
+ print(" Subsequent loads will be instant (model is cached).")
77
+ print(" Please wait...")
78
+ print("=" * 70)
79
+ print("")
80
+
81
+ # Configure session options to prevent thread conflicts
82
+ sess_options = ort.SessionOptions()
83
+ sess_options.intra_op_num_threads = 1 # Single thread per operator
84
+ sess_options.inter_op_num_threads = 1 # Sequential operator execution
85
+ sess_options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL
86
+
87
+ # Suppress CoreML compilation warnings (they're expected for complex models)
88
+ import warnings
89
+ with warnings.catch_warnings():
90
+ warnings.filterwarnings('ignore')
91
+ self.session = ort.InferenceSession(onnx_model_path, sess_options=sess_options, providers=providers)
92
+
93
+ # Check which providers are actually active
94
+ active_providers = self.session.get_providers()
95
+
96
+ if 'CoreMLExecutionProvider' in active_providers:
97
+ print("Using CoreML Neural Engine acceleration for face detection")
98
+ print(" Expected: 5-10x speedup")
99
+ self.backend = 'coreml'
100
+ else:
101
+ print("Using ONNX Runtime with optimized CPU execution for face detection")
102
+ print(" Expected: 2-4x speedup over PyTorch")
103
+ print(" (CoreML not available for this model - some operations unsupported)")
104
+ self.backend = 'onnx_cpu'
105
+
106
+ def preprocess_image(self, img_array: np.ndarray, resize: float = 1.0) -> Tuple[np.ndarray, np.ndarray]:
107
+ """
108
+ Preprocess image for RetinaFace inference
109
+
110
+ Args:
111
+ img_array: BGR image array (H, W, 3)
112
+ resize: Resize factor (default: 1.0 for no resize)
113
+
114
+ Returns:
115
+ Tuple of (preprocessed_tensor, original_image)
116
+ """
117
+ img_raw = img_array.copy()
118
+ img = np.float32(img_raw)
119
+
120
+ # Resize if needed
121
+ if resize != 1.0:
122
+ import cv2
123
+ img = cv2.resize(img, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)
124
+
125
+ # RetinaFace preprocessing: subtract ImageNet mean
126
+ img -= np.array([104.0, 117.0, 123.0], dtype=np.float32)
127
+
128
+ # Convert to NCHW format (batch, channels, height, width)
129
+ img = img.transpose(2, 0, 1)
130
+ img = np.expand_dims(img, axis=0)
131
+
132
+ return img, img_raw
133
+
134
+ def detect_faces(self, img_array: np.ndarray, resize: float = 1.0):
135
+ """
136
+ Detect faces in image array
137
+
138
+ Args:
139
+ img_array: BGR image array (H, W, 3)
140
+ resize: Resize factor for detection (default: 1.0)
141
+
142
+ Returns:
143
+ Tuple of (detections, original_image)
144
+ detections format: [x1, y1, x2, y2, confidence, landmark_x1, landmark_y1, ...]
145
+ """
146
+ profiler = get_profiler()
147
+
148
+ # Preprocess
149
+ with profiler.time_block("preprocessing", f"RetinaFace_preprocess"):
150
+ img, img_raw = self.preprocess_image(img_array, resize)
151
+
152
+ # Run ONNX inference on Neural Engine (or optimized CPU)
153
+ with profiler.time_block("model_inference", f"RetinaFace_{self.backend}"):
154
+ outputs = self.session.run(None, {'input': img})
155
+
156
+ # Unpack outputs: loc, conf, landms
157
+ loc = outputs[0] # Bounding box predictions
158
+ conf = outputs[1] # Confidence scores
159
+ landms = outputs[2] # 5-point landmarks
160
+
161
+ # Post-processing (same as PyTorch version)
162
+ # This part stays in Python as it's fast and complex to export
163
+ with profiler.time_block("postprocessing", f"RetinaFace_postprocess"):
164
+ im_height, im_width, _ = img_raw.shape
165
+
166
+ # Convert outputs to torch tensors for compatibility with existing utilities
167
+ loc = torch.from_numpy(loc)
168
+ conf = torch.from_numpy(conf)
169
+ landms = torch.from_numpy(landms)
170
+
171
+ # Create scale tensor
172
+ scale = torch.Tensor([img.shape[3], img.shape[2], img.shape[3], img.shape[2]])
173
+
174
+ # Generate prior boxes for decoding
175
+ priorbox = PriorBox(self.cfg, image_size=(im_height, im_width))
176
+ priors = priorbox.forward()
177
+ prior_data = priors.data
178
+
179
+ # Decode boxes
180
+ boxes = decode(loc.data.squeeze(0), prior_data, self.cfg['variance'])
181
+ boxes = boxes * scale / resize
182
+ boxes = boxes.cpu().numpy()
183
+
184
+ # Extract scores
185
+ scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
186
+
187
+ # Decode landmarks
188
+ landms_decoded = decode_landm(landms.data.squeeze(0), prior_data, self.cfg['variance'])
189
+ scale1 = torch.Tensor([img.shape[3], img.shape[2]] * 5)
190
+ landms_decoded = landms_decoded * scale1 / resize
191
+ landms_decoded = landms_decoded.cpu().numpy()
192
+
193
+ # Filter by confidence threshold
194
+ inds = np.where(scores > self.confidence_threshold)[0]
195
+ boxes, landms_decoded, scores = boxes[inds], landms_decoded[inds], scores[inds]
196
+
197
+ # Apply NMS
198
+ dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
199
+ keep = py_cpu_nms(dets, self.nms_threshold)
200
+ dets = dets[keep]
201
+ landms_decoded = landms_decoded[keep]
202
+
203
+ # Concatenate boxes and landmarks
204
+ dets = np.concatenate((dets, landms_decoded), axis=1)
205
+
206
+ return dets, img_raw
207
+
208
+ def get_face(self, img_array: np.ndarray, resize: float = 1.0):
209
+ """
210
+ Get the primary face from image
211
+
212
+ Args:
213
+ img_array: BGR image array (H, W, 3)
214
+ resize: Resize factor (default: 1.0)
215
+
216
+ Returns:
217
+ Tuple of (face_crop, detections) or (None, None) if no face found
218
+ """
219
+ dets, img_raw = self.detect_faces(img_array, resize)
220
+
221
+ if dets is None or len(dets) == 0:
222
+ return None, None
223
+
224
+ det = dets[0]
225
+ confidence = det[4]
226
+
227
+ if confidence < self.vis_threshold:
228
+ return None, None
229
+
230
+ bbox = det[:4].astype(int)
231
+ face = img_raw[bbox[1]:bbox[3], bbox[0]:bbox[2]]
232
+
233
+ return face, dets
234
+
235
+
236
+ class OptimizedFaceDetector:
237
+ """
238
+ Wrapper class that automatically selects ONNX or PyTorch implementation
239
+
240
+ This class provides seamless fallback from ONNX (fast) to PyTorch (slow)
241
+ based on model availability.
242
+ """
243
+
244
+ def __init__(self, model_path: str, onnx_model_path: str = None,
245
+ device: str = "cpu",
246
+ confidence_threshold: float = 0.02,
247
+ nms_threshold: float = 0.4,
248
+ vis_threshold: float = 0.5):
249
+ """
250
+ Initialize face detector with intelligent backend selection.
251
+
252
+ Selection logic:
253
+ - CUDA device: Use PyTorch (optimized for NVIDIA GPUs)
254
+ - CPU device: Use ONNX (CoreML on Apple Silicon, optimized CPU on Intel)
255
+
256
+ Args:
257
+ model_path: Path to PyTorch model (.pth)
258
+ onnx_model_path: Path to ONNX model (.onnx), defaults to same directory
259
+ device: Device ('cpu' or 'cuda')
260
+ confidence_threshold: Minimum confidence for detection
261
+ nms_threshold: NMS threshold
262
+ vis_threshold: Visibility threshold
263
+ """
264
+ from pathlib import Path
265
+
266
+ # Store configuration
267
+ self.confidence_threshold = confidence_threshold
268
+ self.nms_threshold = nms_threshold
269
+ self.vis_threshold = vis_threshold
270
+ self.cfg = cfg_mnet
271
+
272
+ # Determine ONNX model path
273
+ if onnx_model_path is None:
274
+ model_dir = Path(model_path).parent
275
+ onnx_model_path = model_dir / 'retinaface_mobilenet025_coreml.onnx'
276
+
277
+ # CUDA: Use PyTorch directly (best for NVIDIA GPUs)
278
+ if device == 'cuda':
279
+ print("Using PyTorch RetinaFace detector (CUDA-accelerated)")
280
+ from openface.face_detection import FaceDetector
281
+ self.detector = FaceDetector(
282
+ model_path=model_path,
283
+ device=device,
284
+ confidence_threshold=confidence_threshold,
285
+ nms_threshold=nms_threshold,
286
+ vis_threshold=vis_threshold
287
+ )
288
+ self.backend = 'pytorch_cuda'
289
+ self.model = self.detector.model
290
+ return
291
+
292
+ # CPU: Try ONNX first (CoreML on Apple Silicon, optimized CPU on Intel)
293
+ if Path(onnx_model_path).exists():
294
+ try:
295
+ print("Using ONNX-accelerated RetinaFace detector")
296
+ self.detector = ONNXRetinaFaceDetector(
297
+ str(onnx_model_path),
298
+ use_coreml=True,
299
+ confidence_threshold=confidence_threshold,
300
+ nms_threshold=nms_threshold,
301
+ vis_threshold=vis_threshold
302
+ )
303
+ self.backend = 'onnx'
304
+
305
+ # Expose model wrapper for compatibility
306
+ class ONNXModelWrapper:
307
+ def __init__(self, detector):
308
+ self.detector = detector
309
+
310
+ def __call__(self, img):
311
+ img_np = img.cpu().numpy()
312
+ outputs = self.detector.session.run(None, {'input': img_np})
313
+ return tuple(torch.from_numpy(o) for o in outputs)
314
+
315
+ self.model = ONNXModelWrapper(self.detector)
316
+ return
317
+ except Exception as e:
318
+ print(f"Failed to load ONNX model: {e}")
319
+ print("Falling back to PyTorch CPU")
320
+
321
+ # Fallback: PyTorch CPU
322
+ print("Using PyTorch RetinaFace detector (CPU)")
323
+ from openface.face_detection import FaceDetector
324
+ self.detector = FaceDetector(
325
+ model_path=model_path,
326
+ device=device,
327
+ confidence_threshold=confidence_threshold,
328
+ nms_threshold=nms_threshold,
329
+ vis_threshold=vis_threshold
330
+ )
331
+ self.backend = 'pytorch_cpu'
332
+ self.model = self.detector.model
333
+
334
+ def detect_faces(self, img_array: np.ndarray, resize: float = 1.0):
335
+ """Detect faces using the selected backend"""
336
+ return self.detector.detect_faces(img_array, resize)
337
+
338
+ def get_face(self, img_array: np.ndarray, resize: float = 1.0):
339
+ """Get primary face using the selected backend"""
340
+ return self.detector.get_face(img_array, resize)
341
+
342
+
343
+ if __name__ == '__main__':
344
+ print("ONNX RetinaFace Detector Module")
345
+ print("=" * 60)
346
+ print("This module provides CoreML-accelerated RetinaFace face detection.")
347
+ print("")
348
+ print("Usage:")
349
+ print(" from onnx_retinaface_detector import OptimizedFaceDetector")
350
+ print(" detector = OptimizedFaceDetector('weights/Alignment_RetinaFace.pth')")
351
+ print(" dets, img = detector.detect_faces(image_array)")
352
+ print("=" * 60)
@@ -0,0 +1,134 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Download model weights for PyFaceAU
4
+
5
+ Usage:
6
+ python -m pyfaceau.download_weights
7
+ """
8
+
9
+ import os
10
+ import sys
11
+ import urllib.request
12
+ from pathlib import Path
13
+ from tqdm import tqdm
14
+
15
+
16
+ WEIGHTS_BASE_URL = "https://github.com/johnwilsoniv/face-analysis/raw/main/S0%20PyfaceAU/weights/"
17
+
18
+ REQUIRED_WEIGHTS = {
19
+ "retinaface_mobilenet025_coreml.onnx": "1.7MB - Face detection model",
20
+ "pfld_cunjian.onnx": "2.8MB - Landmark detection model",
21
+ "In-the-wild_aligned_PDM_68.txt": "67KB - PDM parameters",
22
+ "svr_patches_0.25_general.txt": "1.1MB - CLNF patch experts",
23
+ "tris_68_full.txt": "1KB - Triangulation data",
24
+ }
25
+
26
+ AU_PREDICTOR_FILES = [
27
+ "AU_1_dynamic_intensity_comb.dat",
28
+ "AU_2_dynamic_intensity_comb.dat",
29
+ "AU_4_static_intensity_comb.dat",
30
+ "AU_5_dynamic_intensity_comb.dat",
31
+ "AU_6_static_intensity_comb.dat",
32
+ "AU_7_static_intensity_comb.dat",
33
+ "AU_9_dynamic_intensity_comb.dat",
34
+ "AU_10_static_intensity_comb.dat",
35
+ "AU_12_static_intensity_comb.dat",
36
+ "AU_14_static_intensity_comb.dat",
37
+ "AU_15_dynamic_intensity_comb.dat",
38
+ "AU_17_dynamic_intensity_comb.dat",
39
+ "AU_20_dynamic_intensity_comb.dat",
40
+ "AU_23_dynamic_intensity_comb.dat",
41
+ "AU_25_dynamic_intensity_comb.dat",
42
+ "AU_26_dynamic_intensity_comb.dat",
43
+ "AU_45_dynamic_intensity_comb.dat",
44
+ ]
45
+
46
+
47
+ class DownloadProgressBar(tqdm):
48
+ """Progress bar for downloads"""
49
+ def update_to(self, b=1, bsize=1, tsize=None):
50
+ if tsize is not None:
51
+ self.total = tsize
52
+ self.update(b * bsize - self.n)
53
+
54
+
55
+ def download_file(url, output_path, desc=None):
56
+ """Download a file with progress bar"""
57
+ os.makedirs(os.path.dirname(output_path), exist_ok=True)
58
+
59
+ with DownloadProgressBar(unit='B', unit_scale=True, miniters=1, desc=desc) as t:
60
+ urllib.request.urlretrieve(url, filename=output_path, reporthook=t.update_to)
61
+
62
+
63
+ def get_weights_dir():
64
+ """Get or create weights directory"""
65
+ # First try package installation location
66
+ try:
67
+ import pyfaceau
68
+ pkg_dir = Path(pyfaceau.__file__).parent
69
+ weights_dir = pkg_dir / "weights"
70
+ except:
71
+ # Fall back to current directory
72
+ weights_dir = Path.cwd() / "weights"
73
+
74
+ weights_dir.mkdir(parents=True, exist_ok=True)
75
+ return weights_dir
76
+
77
+
78
+ def main():
79
+ """Download all required weights"""
80
+ print("PyFaceAU Weight Downloader")
81
+ print("=" * 60)
82
+
83
+ weights_dir = get_weights_dir()
84
+ print(f"Downloading weights to: {weights_dir}")
85
+ print()
86
+
87
+ # Download main weights
88
+ print("Downloading main model weights...")
89
+ for filename, description in REQUIRED_WEIGHTS.items():
90
+ output_path = weights_dir / filename
91
+
92
+ if output_path.exists():
93
+ print(f"✓ {filename} (already exists)")
94
+ continue
95
+
96
+ url = WEIGHTS_BASE_URL + filename
97
+ try:
98
+ download_file(url, output_path, desc=f"{filename} ({description})")
99
+ print(f"✓ Downloaded {filename}")
100
+ except Exception as e:
101
+ print(f"✗ Failed to download {filename}: {e}")
102
+ return 1
103
+
104
+ # Download AU predictors
105
+ print("\nDownloading AU predictor models...")
106
+ au_dir = weights_dir / "AU_predictors"
107
+ au_dir.mkdir(exist_ok=True)
108
+
109
+ for filename in AU_PREDICTOR_FILES:
110
+ output_path = au_dir / filename
111
+
112
+ if output_path.exists():
113
+ print(f"✓ {filename} (already exists)")
114
+ continue
115
+
116
+ url = WEIGHTS_BASE_URL + "AU_predictors/" + filename
117
+ try:
118
+ download_file(url, output_path, desc=filename)
119
+ print(f"✓ Downloaded {filename}")
120
+ except Exception as e:
121
+ print(f"✗ Failed to download {filename}: {e}")
122
+ return 1
123
+
124
+ print("\n" + "=" * 60)
125
+ print("✓ All weights downloaded successfully!")
126
+ print(f"Weights location: {weights_dir}")
127
+ print("\nYou can now use PyFaceAU:")
128
+ print(" from pyfaceau import FullPythonAUPipeline")
129
+
130
+ return 0
131
+
132
+
133
+ if __name__ == "__main__":
134
+ sys.exit(main())
File without changes