pyfaceau 1.0.3__cp310-cp310-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyfaceau/__init__.py +19 -0
- pyfaceau/alignment/__init__.py +0 -0
- pyfaceau/alignment/calc_params.py +671 -0
- pyfaceau/alignment/face_aligner.py +352 -0
- pyfaceau/alignment/numba_calcparams_accelerator.py +244 -0
- pyfaceau/cython_histogram_median.cp310-win_amd64.pyd +0 -0
- pyfaceau/cython_rotation_update.cp310-win_amd64.pyd +0 -0
- pyfaceau/detectors/__init__.py +0 -0
- pyfaceau/detectors/pfld.py +128 -0
- pyfaceau/detectors/retinaface.py +352 -0
- pyfaceau/download_weights.py +134 -0
- pyfaceau/features/__init__.py +0 -0
- pyfaceau/features/histogram_median_tracker.py +335 -0
- pyfaceau/features/pdm.py +269 -0
- pyfaceau/features/triangulation.py +64 -0
- pyfaceau/parallel_pipeline.py +462 -0
- pyfaceau/pipeline.py +1083 -0
- pyfaceau/prediction/__init__.py +0 -0
- pyfaceau/prediction/au_predictor.py +434 -0
- pyfaceau/prediction/batched_au_predictor.py +269 -0
- pyfaceau/prediction/model_parser.py +337 -0
- pyfaceau/prediction/running_median.py +318 -0
- pyfaceau/prediction/running_median_fallback.py +200 -0
- pyfaceau/processor.py +270 -0
- pyfaceau/refinement/__init__.py +12 -0
- pyfaceau/refinement/svr_patch_expert.py +361 -0
- pyfaceau/refinement/targeted_refiner.py +362 -0
- pyfaceau/utils/__init__.py +0 -0
- pyfaceau/utils/cython_extensions/cython_histogram_median.c +35391 -0
- pyfaceau/utils/cython_extensions/cython_histogram_median.pyx +316 -0
- pyfaceau/utils/cython_extensions/cython_rotation_update.c +32262 -0
- pyfaceau/utils/cython_extensions/cython_rotation_update.pyx +211 -0
- pyfaceau/utils/cython_extensions/setup.py +47 -0
- pyfaceau-1.0.3.data/scripts/pyfaceau_gui.py +302 -0
- pyfaceau-1.0.3.dist-info/METADATA +466 -0
- pyfaceau-1.0.3.dist-info/RECORD +40 -0
- pyfaceau-1.0.3.dist-info/WHEEL +5 -0
- pyfaceau-1.0.3.dist-info/entry_points.txt +3 -0
- pyfaceau-1.0.3.dist-info/licenses/LICENSE +40 -0
- pyfaceau-1.0.3.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,462 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Parallel AU Extraction Pipeline - High Performance
|
|
4
|
+
|
|
5
|
+
This module extends the base pipeline with multiprocessing support to achieve
|
|
6
|
+
30-50 FPS by processing multiple frames simultaneously.
|
|
7
|
+
|
|
8
|
+
Key Features:
|
|
9
|
+
- Process 4-8 frames in parallel (scales with CPU cores)
|
|
10
|
+
- Maintains frame ordering for sequential running median updates
|
|
11
|
+
- Compatible with existing pipeline components
|
|
12
|
+
- Target: 30-50 FPS (6-10x speedup over sequential processing)
|
|
13
|
+
|
|
14
|
+
Usage:
|
|
15
|
+
from pyauface.parallel_pipeline import ParallelAUPipeline
|
|
16
|
+
|
|
17
|
+
pipeline = ParallelAUPipeline(
|
|
18
|
+
retinaface_model='weights/retinaface.onnx',
|
|
19
|
+
pfld_model='weights/pfld.onnx',
|
|
20
|
+
pdm_file='weights/pdm.txt',
|
|
21
|
+
au_models_dir='weights/AU_predictors',
|
|
22
|
+
triangulation_file='weights/tris.txt',
|
|
23
|
+
num_workers=6 # Number of parallel workers
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
results = pipeline.process_video('input.mp4', 'output.csv')
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
import numpy as np
|
|
30
|
+
import pandas as pd
|
|
31
|
+
import cv2
|
|
32
|
+
from pathlib import Path
|
|
33
|
+
from typing import Dict, List, Optional, Tuple
|
|
34
|
+
import multiprocessing as mp
|
|
35
|
+
from multiprocessing import Pool, Manager
|
|
36
|
+
import queue
|
|
37
|
+
import time
|
|
38
|
+
|
|
39
|
+
# Import pipeline components
|
|
40
|
+
from pyfaceau.pipeline import FullPythonAUPipeline
|
|
41
|
+
import pyfhog
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class ParallelAUPipeline:
|
|
45
|
+
"""
|
|
46
|
+
High-performance parallel AU extraction pipeline
|
|
47
|
+
|
|
48
|
+
Processes multiple video frames simultaneously using multiprocessing
|
|
49
|
+
to achieve 30-50 FPS throughput (6-10x faster than sequential).
|
|
50
|
+
|
|
51
|
+
Architecture:
|
|
52
|
+
1. Main process: Reads frames from video
|
|
53
|
+
2. Worker pool: Processes frames in parallel (face detection, landmarks, alignment, features)
|
|
54
|
+
3. Main process: Updates running median sequentially, predicts AUs
|
|
55
|
+
"""
|
|
56
|
+
|
|
57
|
+
def __init__(
|
|
58
|
+
self,
|
|
59
|
+
retinaface_model: str,
|
|
60
|
+
pfld_model: str,
|
|
61
|
+
pdm_file: str,
|
|
62
|
+
au_models_dir: str,
|
|
63
|
+
triangulation_file: str,
|
|
64
|
+
num_workers: int = 6,
|
|
65
|
+
batch_size: int = 30,
|
|
66
|
+
use_calc_params: bool = True,
|
|
67
|
+
track_faces: bool = True,
|
|
68
|
+
use_batched_predictor: bool = True,
|
|
69
|
+
verbose: bool = True
|
|
70
|
+
):
|
|
71
|
+
"""
|
|
72
|
+
Initialize parallel pipeline
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
retinaface_model: Path to RetinaFace ONNX model
|
|
76
|
+
pfld_model: Path to PFLD ONNX model
|
|
77
|
+
pdm_file: Path to PDM shape model
|
|
78
|
+
au_models_dir: Directory containing AU SVR models
|
|
79
|
+
triangulation_file: Path to triangulation file
|
|
80
|
+
num_workers: Number of parallel worker processes (default: 6)
|
|
81
|
+
batch_size: Frames to process per batch (default: 30)
|
|
82
|
+
use_calc_params: Use full CalcParams for pose estimation
|
|
83
|
+
track_faces: Enable face tracking (skip detection on most frames)
|
|
84
|
+
use_batched_predictor: Use optimized batched AU predictor (2-5x faster)
|
|
85
|
+
verbose: Print progress messages
|
|
86
|
+
"""
|
|
87
|
+
self.num_workers = num_workers
|
|
88
|
+
self.batch_size = batch_size
|
|
89
|
+
self.verbose = verbose
|
|
90
|
+
|
|
91
|
+
# Store initialization parameters
|
|
92
|
+
self.init_params = {
|
|
93
|
+
'retinaface_model': retinaface_model,
|
|
94
|
+
'pfld_model': pfld_model,
|
|
95
|
+
'pdm_file': pdm_file,
|
|
96
|
+
'au_models_dir': au_models_dir,
|
|
97
|
+
'triangulation_file': triangulation_file,
|
|
98
|
+
'use_calc_params': use_calc_params,
|
|
99
|
+
'track_faces': track_faces,
|
|
100
|
+
'use_batched_predictor': use_batched_predictor,
|
|
101
|
+
'verbose': False # Disable worker verbosity
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
# Initialize main process pipeline (for AU prediction)
|
|
105
|
+
if self.verbose:
|
|
106
|
+
print(f"Initializing parallel pipeline with {num_workers} workers...")
|
|
107
|
+
print(f"Target FPS: {num_workers * 4.6:.1f} FPS (theoretical)")
|
|
108
|
+
print("")
|
|
109
|
+
|
|
110
|
+
self.main_pipeline = FullPythonAUPipeline(
|
|
111
|
+
retinaface_model=retinaface_model,
|
|
112
|
+
pfld_model=pfld_model,
|
|
113
|
+
pdm_file=pdm_file,
|
|
114
|
+
au_models_dir=au_models_dir,
|
|
115
|
+
triangulation_file=triangulation_file,
|
|
116
|
+
use_calc_params=use_calc_params,
|
|
117
|
+
use_coreml=False, # Use CPU for multiprocessing (avoid CoreML threading issues)
|
|
118
|
+
track_faces=track_faces,
|
|
119
|
+
use_batched_predictor=use_batched_predictor,
|
|
120
|
+
verbose=verbose
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
# Initialize components in main process
|
|
124
|
+
self.main_pipeline._initialize_components()
|
|
125
|
+
|
|
126
|
+
def process_video(
|
|
127
|
+
self,
|
|
128
|
+
video_path: str,
|
|
129
|
+
output_csv: Optional[str] = None,
|
|
130
|
+
max_frames: Optional[int] = None
|
|
131
|
+
) -> pd.DataFrame:
|
|
132
|
+
"""
|
|
133
|
+
Process video with parallel frame processing
|
|
134
|
+
|
|
135
|
+
Args:
|
|
136
|
+
video_path: Path to input video
|
|
137
|
+
output_csv: Optional path to save results
|
|
138
|
+
max_frames: Optional limit on frames to process
|
|
139
|
+
|
|
140
|
+
Returns:
|
|
141
|
+
DataFrame with AU predictions for all frames
|
|
142
|
+
"""
|
|
143
|
+
video_path = Path(video_path)
|
|
144
|
+
if not video_path.exists():
|
|
145
|
+
raise FileNotFoundError(f"Video not found: {video_path}")
|
|
146
|
+
|
|
147
|
+
if self.verbose:
|
|
148
|
+
print(f"Processing video: {video_path.name}")
|
|
149
|
+
print("=" * 80)
|
|
150
|
+
print("")
|
|
151
|
+
|
|
152
|
+
# Open video
|
|
153
|
+
cap = cv2.VideoCapture(str(video_path))
|
|
154
|
+
fps = cap.get(cv2.CAP_PROP_FPS)
|
|
155
|
+
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
156
|
+
|
|
157
|
+
if max_frames:
|
|
158
|
+
total_frames = min(total_frames, max_frames)
|
|
159
|
+
|
|
160
|
+
if self.verbose:
|
|
161
|
+
print(f"Video info:")
|
|
162
|
+
print(f" FPS: {fps:.2f}")
|
|
163
|
+
print(f" Total frames: {total_frames}")
|
|
164
|
+
print(f" Workers: {self.num_workers}")
|
|
165
|
+
print(f" Batch size: {self.batch_size}")
|
|
166
|
+
print("")
|
|
167
|
+
|
|
168
|
+
# Process video in batches
|
|
169
|
+
results = []
|
|
170
|
+
frame_idx = 0
|
|
171
|
+
start_time = time.time()
|
|
172
|
+
|
|
173
|
+
try:
|
|
174
|
+
# Create worker pool
|
|
175
|
+
with Pool(processes=self.num_workers, initializer=_init_worker, initargs=(self.init_params,)) as pool:
|
|
176
|
+
while frame_idx < total_frames:
|
|
177
|
+
# Read batch of frames
|
|
178
|
+
batch_frames = []
|
|
179
|
+
batch_indices = []
|
|
180
|
+
|
|
181
|
+
for _ in range(self.batch_size):
|
|
182
|
+
if frame_idx >= total_frames:
|
|
183
|
+
break
|
|
184
|
+
|
|
185
|
+
ret, frame = cap.read()
|
|
186
|
+
if not ret:
|
|
187
|
+
break
|
|
188
|
+
|
|
189
|
+
batch_frames.append(frame)
|
|
190
|
+
batch_indices.append(frame_idx)
|
|
191
|
+
frame_idx += 1
|
|
192
|
+
|
|
193
|
+
if not batch_frames:
|
|
194
|
+
break
|
|
195
|
+
|
|
196
|
+
# Process batch in parallel
|
|
197
|
+
batch_start = time.time()
|
|
198
|
+
|
|
199
|
+
# Map frames to workers
|
|
200
|
+
frame_data = [(idx, frame, fps) for idx, frame in zip(batch_indices, batch_frames)]
|
|
201
|
+
feature_results = pool.map(_process_frame_worker, frame_data)
|
|
202
|
+
|
|
203
|
+
batch_time = time.time() - batch_start
|
|
204
|
+
|
|
205
|
+
# Process results sequentially (running median + AU prediction)
|
|
206
|
+
for idx, frame_features in zip(batch_indices, feature_results):
|
|
207
|
+
if frame_features is None:
|
|
208
|
+
# Frame processing failed
|
|
209
|
+
results.append({
|
|
210
|
+
'frame': idx,
|
|
211
|
+
'timestamp': idx / fps,
|
|
212
|
+
'success': False
|
|
213
|
+
})
|
|
214
|
+
continue
|
|
215
|
+
|
|
216
|
+
# Extract features
|
|
217
|
+
hog_features = frame_features['hog_features']
|
|
218
|
+
geom_features = frame_features['geom_features']
|
|
219
|
+
|
|
220
|
+
# Update running median (must be sequential)
|
|
221
|
+
update_histogram = (idx % 2 == 1)
|
|
222
|
+
self.main_pipeline.running_median.update(
|
|
223
|
+
hog_features,
|
|
224
|
+
geom_features,
|
|
225
|
+
update_histogram=update_histogram
|
|
226
|
+
)
|
|
227
|
+
running_median = self.main_pipeline.running_median.get_combined_median()
|
|
228
|
+
|
|
229
|
+
# Predict AUs
|
|
230
|
+
au_results = self.main_pipeline._predict_aus(
|
|
231
|
+
hog_features,
|
|
232
|
+
geom_features,
|
|
233
|
+
running_median
|
|
234
|
+
)
|
|
235
|
+
|
|
236
|
+
# Store result
|
|
237
|
+
result = {
|
|
238
|
+
'frame': idx,
|
|
239
|
+
'timestamp': idx / fps,
|
|
240
|
+
'success': True
|
|
241
|
+
}
|
|
242
|
+
result.update(au_results)
|
|
243
|
+
results.append(result)
|
|
244
|
+
|
|
245
|
+
# Progress update
|
|
246
|
+
if self.verbose:
|
|
247
|
+
elapsed = time.time() - start_time
|
|
248
|
+
current_fps = frame_idx / elapsed if elapsed > 0 else 0
|
|
249
|
+
batch_fps = len(batch_frames) / batch_time if batch_time > 0 else 0
|
|
250
|
+
eta = (total_frames - frame_idx) / current_fps if current_fps > 0 else 0
|
|
251
|
+
|
|
252
|
+
print(f"Progress: {frame_idx}/{total_frames} frames "
|
|
253
|
+
f"({frame_idx/total_frames*100:.1f}%) - "
|
|
254
|
+
f"Batch FPS: {batch_fps:.1f}, "
|
|
255
|
+
f"Overall FPS: {current_fps:.1f}, "
|
|
256
|
+
f"ETA: {eta:.1f}s")
|
|
257
|
+
|
|
258
|
+
finally:
|
|
259
|
+
cap.release()
|
|
260
|
+
|
|
261
|
+
# Convert to DataFrame
|
|
262
|
+
df = pd.DataFrame(results)
|
|
263
|
+
|
|
264
|
+
# Statistics
|
|
265
|
+
total_time = time.time() - start_time
|
|
266
|
+
total_processed = df['success'].sum()
|
|
267
|
+
overall_fps = total_processed / total_time if total_time > 0 else 0
|
|
268
|
+
|
|
269
|
+
if self.verbose:
|
|
270
|
+
print("")
|
|
271
|
+
print("=" * 80)
|
|
272
|
+
print("PROCESSING COMPLETE")
|
|
273
|
+
print("=" * 80)
|
|
274
|
+
print(f"Total frames: {len(df)}")
|
|
275
|
+
print(f"Successful: {total_processed}")
|
|
276
|
+
print(f"Failed: {len(df) - total_processed}")
|
|
277
|
+
print(f"Total time: {total_time:.2f}s")
|
|
278
|
+
print(f"Overall FPS: {overall_fps:.2f}")
|
|
279
|
+
print(f"Speedup vs sequential (4.6 FPS): {overall_fps/4.6:.2f}x")
|
|
280
|
+
print("")
|
|
281
|
+
|
|
282
|
+
# Save to CSV if requested
|
|
283
|
+
if output_csv:
|
|
284
|
+
df.to_csv(output_csv, index=False)
|
|
285
|
+
if self.verbose:
|
|
286
|
+
print(f"Results saved to: {output_csv}")
|
|
287
|
+
print("")
|
|
288
|
+
|
|
289
|
+
return df
|
|
290
|
+
|
|
291
|
+
|
|
292
|
+
# Global worker pipeline (initialized once per worker)
|
|
293
|
+
_worker_pipeline = None
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
def _init_worker(init_params):
|
|
297
|
+
"""Initialize worker process with its own pipeline instance"""
|
|
298
|
+
global _worker_pipeline
|
|
299
|
+
|
|
300
|
+
# Disable CoreML in workers (use CPU for multiprocessing)
|
|
301
|
+
_worker_pipeline = FullPythonAUPipeline(
|
|
302
|
+
retinaface_model=init_params['retinaface_model'],
|
|
303
|
+
pfld_model=init_params['pfld_model'],
|
|
304
|
+
pdm_file=init_params['pdm_file'],
|
|
305
|
+
au_models_dir=init_params['au_models_dir'],
|
|
306
|
+
triangulation_file=init_params['triangulation_file'],
|
|
307
|
+
use_calc_params=init_params['use_calc_params'],
|
|
308
|
+
use_coreml=False, # CPU only for workers
|
|
309
|
+
track_faces=False, # Disable tracking in workers (each frame independent)
|
|
310
|
+
use_batched_predictor=init_params.get('use_batched_predictor', True),
|
|
311
|
+
verbose=False
|
|
312
|
+
)
|
|
313
|
+
|
|
314
|
+
# Initialize components
|
|
315
|
+
_worker_pipeline._initialize_components()
|
|
316
|
+
|
|
317
|
+
|
|
318
|
+
def _process_frame_worker(frame_data):
|
|
319
|
+
"""
|
|
320
|
+
Worker function to process a single frame
|
|
321
|
+
|
|
322
|
+
Extracts features (detection, landmarks, alignment, HOG, geometric)
|
|
323
|
+
but does NOT update running median or predict AUs (done in main process)
|
|
324
|
+
|
|
325
|
+
Args:
|
|
326
|
+
frame_data: Tuple of (frame_idx, frame, fps)
|
|
327
|
+
|
|
328
|
+
Returns:
|
|
329
|
+
Dictionary with extracted features or None if failed
|
|
330
|
+
"""
|
|
331
|
+
global _worker_pipeline
|
|
332
|
+
|
|
333
|
+
frame_idx, frame, fps = frame_data
|
|
334
|
+
|
|
335
|
+
try:
|
|
336
|
+
# Step 1: Detect face
|
|
337
|
+
detections, _ = _worker_pipeline.face_detector.detect_faces(frame)
|
|
338
|
+
if len(detections) == 0:
|
|
339
|
+
return None
|
|
340
|
+
|
|
341
|
+
det = detections[0]
|
|
342
|
+
bbox = det[:4].astype(int)
|
|
343
|
+
|
|
344
|
+
# Step 2: Detect landmarks
|
|
345
|
+
landmarks_68, _ = _worker_pipeline.landmark_detector.detect_landmarks(frame, bbox)
|
|
346
|
+
|
|
347
|
+
# Step 3: Estimate 3D pose
|
|
348
|
+
if _worker_pipeline.use_calc_params and _worker_pipeline.calc_params:
|
|
349
|
+
params_global, params_local = _worker_pipeline.calc_params.calc_params(
|
|
350
|
+
landmarks_68.flatten()
|
|
351
|
+
)
|
|
352
|
+
scale = params_global[0]
|
|
353
|
+
rx, ry, rz = params_global[1:4]
|
|
354
|
+
tx, ty = params_global[4:6]
|
|
355
|
+
else:
|
|
356
|
+
# Simplified pose
|
|
357
|
+
tx = (bbox[0] + bbox[2]) / 2
|
|
358
|
+
ty = (bbox[1] + bbox[3]) / 2
|
|
359
|
+
rz = 0.0
|
|
360
|
+
params_local = np.zeros(34)
|
|
361
|
+
|
|
362
|
+
# Step 4: Align face
|
|
363
|
+
aligned_face = _worker_pipeline.face_aligner.align_face(
|
|
364
|
+
image=frame,
|
|
365
|
+
landmarks_68=landmarks_68,
|
|
366
|
+
pose_tx=tx,
|
|
367
|
+
pose_ty=ty,
|
|
368
|
+
p_rz=rz,
|
|
369
|
+
apply_mask=True,
|
|
370
|
+
triangulation=_worker_pipeline.triangulation
|
|
371
|
+
)
|
|
372
|
+
|
|
373
|
+
# Step 5: Extract HOG features
|
|
374
|
+
hog_features = pyfhog.extract_fhog_features(aligned_face, cell_size=8)
|
|
375
|
+
hog_features = hog_features.flatten().astype(np.float32)
|
|
376
|
+
|
|
377
|
+
# Step 6: Extract geometric features
|
|
378
|
+
geom_features = _worker_pipeline.pdm_parser.extract_geometric_features(params_local)
|
|
379
|
+
geom_features = geom_features.astype(np.float32)
|
|
380
|
+
|
|
381
|
+
return {
|
|
382
|
+
'hog_features': hog_features,
|
|
383
|
+
'geom_features': geom_features
|
|
384
|
+
}
|
|
385
|
+
|
|
386
|
+
except Exception as e:
|
|
387
|
+
# Frame processing failed
|
|
388
|
+
return None
|
|
389
|
+
|
|
390
|
+
|
|
391
|
+
def main():
|
|
392
|
+
"""Command-line interface for parallel AU pipeline"""
|
|
393
|
+
import argparse
|
|
394
|
+
|
|
395
|
+
parser = argparse.ArgumentParser(
|
|
396
|
+
description="Parallel AU Extraction Pipeline (30-50 FPS)",
|
|
397
|
+
formatter_class=argparse.RawDescriptionHelpFormatter
|
|
398
|
+
)
|
|
399
|
+
|
|
400
|
+
parser.add_argument('--video', required=True, help='Input video file')
|
|
401
|
+
parser.add_argument('--output', help='Output CSV file (default: <video>_aus.csv)')
|
|
402
|
+
parser.add_argument('--max-frames', type=int, help='Maximum frames to process')
|
|
403
|
+
parser.add_argument('--workers', type=int, default=6, help='Number of parallel workers (default: 6)')
|
|
404
|
+
parser.add_argument('--batch-size', type=int, default=30, help='Frames per batch (default: 30)')
|
|
405
|
+
|
|
406
|
+
# Model paths
|
|
407
|
+
parser.add_argument('--retinaface', default='weights/retinaface_mobilenet025_coreml.onnx')
|
|
408
|
+
parser.add_argument('--pfld', default='weights/pfld_cunjian.onnx')
|
|
409
|
+
parser.add_argument('--pdm', default='weights/In-the-wild_aligned_PDM_68.txt')
|
|
410
|
+
parser.add_argument('--au-models', default='weights/AU_predictors')
|
|
411
|
+
parser.add_argument('--triangulation', default='weights/tris_68_full.txt')
|
|
412
|
+
|
|
413
|
+
args = parser.parse_args()
|
|
414
|
+
|
|
415
|
+
# Set default output path
|
|
416
|
+
if not args.output:
|
|
417
|
+
video_path = Path(args.video)
|
|
418
|
+
args.output = str(video_path.parent / f"{video_path.stem}_parallel_aus.csv")
|
|
419
|
+
|
|
420
|
+
# Initialize parallel pipeline
|
|
421
|
+
try:
|
|
422
|
+
pipeline = ParallelAUPipeline(
|
|
423
|
+
retinaface_model=args.retinaface,
|
|
424
|
+
pfld_model=args.pfld,
|
|
425
|
+
pdm_file=args.pdm,
|
|
426
|
+
au_models_dir=args.au_models,
|
|
427
|
+
triangulation_file=args.triangulation,
|
|
428
|
+
num_workers=args.workers,
|
|
429
|
+
batch_size=args.batch_size,
|
|
430
|
+
verbose=True
|
|
431
|
+
)
|
|
432
|
+
except Exception as e:
|
|
433
|
+
print(f"Failed to initialize pipeline: {e}")
|
|
434
|
+
return 1
|
|
435
|
+
|
|
436
|
+
# Process video
|
|
437
|
+
try:
|
|
438
|
+
df = pipeline.process_video(
|
|
439
|
+
video_path=args.video,
|
|
440
|
+
output_csv=args.output,
|
|
441
|
+
max_frames=args.max_frames
|
|
442
|
+
)
|
|
443
|
+
|
|
444
|
+
print("=" * 80)
|
|
445
|
+
print("SUCCESS")
|
|
446
|
+
print("=" * 80)
|
|
447
|
+
print(f"Processed {len(df)} frames")
|
|
448
|
+
print(f"Results saved to: {args.output}")
|
|
449
|
+
print("")
|
|
450
|
+
|
|
451
|
+
return 0
|
|
452
|
+
|
|
453
|
+
except Exception as e:
|
|
454
|
+
print(f"Processing failed: {e}")
|
|
455
|
+
import traceback
|
|
456
|
+
traceback.print_exc()
|
|
457
|
+
return 1
|
|
458
|
+
|
|
459
|
+
|
|
460
|
+
if __name__ == '__main__':
|
|
461
|
+
import sys
|
|
462
|
+
sys.exit(main())
|