pyfaceau 1.3.5__tar.gz → 1.3.7__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pyfaceau-1.3.5/pyfaceau.egg-info → pyfaceau-1.3.7}/PKG-INFO +1 -1
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/pipeline.py +121 -98
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/processor.py +31 -22
- {pyfaceau-1.3.5 → pyfaceau-1.3.7/pyfaceau.egg-info}/PKG-INFO +1 -1
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyproject.toml +1 -1
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/setup.py +1 -1
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/LICENSE +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/MANIFEST.in +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/README.md +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/__init__.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/alignment/__init__.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/alignment/calc_params.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/alignment/face_aligner.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/alignment/numba_calcparams_accelerator.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/alignment/paw.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/config.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/data/__init__.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/data/hdf5_dataset.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/data/quality_filter.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/data/training_data_generator.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/detectors/__init__.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/detectors/extract_mtcnn_weights.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/detectors/openface_mtcnn.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/detectors/pfld.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/detectors/pymtcnn_detector.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/detectors/retinaface.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/download_weights.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/features/__init__.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/features/histogram_median_tracker.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/features/pdm.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/features/triangulation.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/nn/__init__.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/nn/au_prediction_inference.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/nn/au_prediction_net.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/nn/fast_pipeline.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/nn/landmark_pose_inference.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/nn/landmark_pose_net.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/nn/train_au_prediction.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/nn/train_landmark_pose.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/parallel_pipeline.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/prediction/__init__.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/prediction/au_predictor.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/prediction/batched_au_predictor.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/prediction/model_parser.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/prediction/online_au_correction.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/prediction/running_median.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/prediction/running_median_fallback.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/refinement/__init__.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/refinement/pdm.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/refinement/svr_patch_expert.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/refinement/targeted_refiner.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/utils/__init__.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/utils/cython_extensions/cython_histogram_median.pyx +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/utils/cython_extensions/cython_rotation_update.pyx +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/utils/cython_extensions/setup.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau.egg-info/SOURCES.txt +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau.egg-info/dependency_links.txt +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau.egg-info/entry_points.txt +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau.egg-info/not-zip-safe +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau.egg-info/requires.txt +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau.egg-info/top_level.txt +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau_gui.py +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/requirements.txt +0 -0
- {pyfaceau-1.3.5 → pyfaceau-1.3.7}/setup.cfg +0 -0
|
@@ -22,7 +22,7 @@ import numpy as np
|
|
|
22
22
|
import pandas as pd
|
|
23
23
|
import cv2
|
|
24
24
|
from pathlib import Path
|
|
25
|
-
from typing import Dict, List, Optional, Tuple
|
|
25
|
+
from typing import Callable, Dict, List, Optional, Tuple
|
|
26
26
|
import argparse
|
|
27
27
|
import sys
|
|
28
28
|
import time
|
|
@@ -75,11 +75,19 @@ except ImportError:
|
|
|
75
75
|
sys.path.insert(0, str(pyfhog_src_path))
|
|
76
76
|
import pyfhog
|
|
77
77
|
else:
|
|
78
|
-
|
|
79
|
-
|
|
78
|
+
safe_print("Error: pyfhog not found. Please install it:")
|
|
79
|
+
safe_print(" cd ../pyfhog && pip install -e .")
|
|
80
80
|
sys.exit(1)
|
|
81
81
|
|
|
82
82
|
|
|
83
|
+
def safe_print(*args, **kwargs):
|
|
84
|
+
"""Print wrapper that handles BrokenPipeError in GUI subprocess contexts."""
|
|
85
|
+
try:
|
|
86
|
+
print(*args, **kwargs)
|
|
87
|
+
except (BrokenPipeError, IOError):
|
|
88
|
+
pass # Stdout disconnected (e.g., GUI subprocess terminated)
|
|
89
|
+
|
|
90
|
+
|
|
83
91
|
def get_video_rotation(video_path: str) -> int:
|
|
84
92
|
"""
|
|
85
93
|
Get video rotation from metadata using ffprobe.
|
|
@@ -284,11 +292,11 @@ class FullPythonAUPipeline:
|
|
|
284
292
|
if self.verbose:
|
|
285
293
|
thread_name = threading.current_thread().name
|
|
286
294
|
is_main = threading.current_thread() == threading.main_thread()
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
295
|
+
safe_print("=" * 80)
|
|
296
|
+
safe_print("INITIALIZING COMPONENTS")
|
|
297
|
+
safe_print(f"Thread: {thread_name} (main={is_main})")
|
|
298
|
+
safe_print("=" * 80)
|
|
299
|
+
safe_print("")
|
|
292
300
|
|
|
293
301
|
# Get initialization parameters
|
|
294
302
|
mtcnn_backend = self._init_params['mtcnn_backend']
|
|
@@ -301,8 +309,8 @@ class FullPythonAUPipeline:
|
|
|
301
309
|
|
|
302
310
|
# Component 1: Face Detection (PyMTCNN with multi-backend support)
|
|
303
311
|
if self.verbose:
|
|
304
|
-
|
|
305
|
-
|
|
312
|
+
safe_print("[1/8] Loading face detector (PyMTCNN)...")
|
|
313
|
+
safe_print(f" Backend: {mtcnn_backend}")
|
|
306
314
|
|
|
307
315
|
if not PYMTCNN_AVAILABLE:
|
|
308
316
|
raise ImportError(
|
|
@@ -320,15 +328,15 @@ class FullPythonAUPipeline:
|
|
|
320
328
|
)
|
|
321
329
|
if self.verbose:
|
|
322
330
|
backend_info = self.face_detector.get_backend_info()
|
|
323
|
-
|
|
324
|
-
|
|
331
|
+
safe_print(f" Active backend: {backend_info}")
|
|
332
|
+
safe_print("Face detector loaded\n")
|
|
325
333
|
|
|
326
334
|
# Component 2: Landmark Detection (pyclnf CLNF with GPU acceleration)
|
|
327
335
|
if self.verbose:
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
336
|
+
safe_print("[2/8] Loading CLNF landmark detector (pyclnf)...")
|
|
337
|
+
safe_print(f" Max iterations: {max_clnf_iterations}")
|
|
338
|
+
safe_print(f" Convergence threshold: {clnf_convergence_threshold} pixels")
|
|
339
|
+
safe_print(f" GPU enabled: {CLNF_CONFIG.get('use_gpu', False)}")
|
|
332
340
|
|
|
333
341
|
# Lazy import to avoid circular import (pyfaceau ↔ pyclnf)
|
|
334
342
|
from pyclnf import CLNF
|
|
@@ -347,14 +355,14 @@ class FullPythonAUPipeline:
|
|
|
347
355
|
)
|
|
348
356
|
|
|
349
357
|
if self.verbose:
|
|
350
|
-
|
|
358
|
+
safe_print(f"CLNF detector loaded\n")
|
|
351
359
|
|
|
352
360
|
# Component 3: PDM Parser (moved before CLNF to support PDM enforcement)
|
|
353
361
|
if self.verbose:
|
|
354
|
-
|
|
362
|
+
safe_print("[3/8] Loading PDM shape model...")
|
|
355
363
|
self.pdm_parser = PDMParser(pdm_file)
|
|
356
364
|
if self.verbose:
|
|
357
|
-
|
|
365
|
+
safe_print(f"PDM loaded: {self.pdm_parser.mean_shape.shape[0]//3} landmarks\n")
|
|
358
366
|
|
|
359
367
|
# Note: CalcParams is no longer used for geometric features
|
|
360
368
|
# pyclnf's optimized params are used instead (see GEOMETRIC_FEATURES_BUG.md)
|
|
@@ -363,28 +371,28 @@ class FullPythonAUPipeline:
|
|
|
363
371
|
|
|
364
372
|
# Component 4: Face Aligner
|
|
365
373
|
if self.verbose:
|
|
366
|
-
|
|
374
|
+
safe_print("[4/8] Initializing face aligner...")
|
|
367
375
|
self.face_aligner = OpenFace22FaceAligner(
|
|
368
376
|
pdm_file=pdm_file,
|
|
369
377
|
sim_scale=0.7,
|
|
370
378
|
output_size=(112, 112)
|
|
371
379
|
)
|
|
372
380
|
if self.verbose:
|
|
373
|
-
|
|
381
|
+
safe_print("Face aligner initialized\n")
|
|
374
382
|
|
|
375
383
|
# Note: CLNF landmark detector is already initialized above (Component 2)
|
|
376
384
|
# No separate refiner needed - CLNF does full detection from PDM mean shape
|
|
377
385
|
|
|
378
386
|
# Component 5: Triangulation
|
|
379
387
|
if self.verbose:
|
|
380
|
-
|
|
388
|
+
safe_print("[5/8] Loading triangulation...")
|
|
381
389
|
self.triangulation = TriangulationParser(triangulation_file)
|
|
382
390
|
if self.verbose:
|
|
383
|
-
|
|
391
|
+
safe_print(f"Triangulation loaded: {len(self.triangulation.triangles)} triangles\n")
|
|
384
392
|
|
|
385
393
|
# Component 6: AU Models
|
|
386
394
|
if self.verbose:
|
|
387
|
-
|
|
395
|
+
safe_print("[6/8] Loading AU SVR models...")
|
|
388
396
|
model_parser = OF22ModelParser(au_models_dir)
|
|
389
397
|
self.au_models = model_parser.load_all_models(
|
|
390
398
|
use_recommended=True,
|
|
@@ -392,30 +400,30 @@ class FullPythonAUPipeline:
|
|
|
392
400
|
verbose=self.verbose
|
|
393
401
|
)
|
|
394
402
|
if self.verbose:
|
|
395
|
-
|
|
403
|
+
safe_print(f"Loaded {len(self.au_models)} AU models")
|
|
396
404
|
|
|
397
405
|
# Initialize batched predictor if enabled
|
|
398
406
|
if self.use_batched_predictor:
|
|
399
407
|
self.batched_au_predictor = BatchedAUPredictor(self.au_models)
|
|
400
408
|
if self.verbose:
|
|
401
|
-
|
|
409
|
+
safe_print(f"Batched AU predictor enabled (2-5x faster)")
|
|
402
410
|
if self.verbose:
|
|
403
|
-
|
|
411
|
+
safe_print("")
|
|
404
412
|
|
|
405
413
|
# Component 7: Running Median Tracker
|
|
406
414
|
if self.verbose:
|
|
407
|
-
|
|
415
|
+
safe_print("[7/8] Initializing running median tracker...")
|
|
408
416
|
# Use locked configuration from config.py (matches C++ OpenFace)
|
|
409
417
|
self.running_median = DualHistogramMedianTracker(**RUNNING_MEDIAN_CONFIG)
|
|
410
418
|
if self.verbose:
|
|
411
419
|
if USING_CYTHON:
|
|
412
|
-
|
|
420
|
+
safe_print("Running median tracker initialized (Cython-optimized, 260x faster)\n")
|
|
413
421
|
else:
|
|
414
|
-
|
|
422
|
+
safe_print("Running median tracker initialized (Python version)\n")
|
|
415
423
|
|
|
416
424
|
# Component 8: Online AU Correction (C++ CorrectOnlineAUs equivalent)
|
|
417
425
|
if self.verbose:
|
|
418
|
-
|
|
426
|
+
safe_print("[8/9] Initializing online AU correction...")
|
|
419
427
|
# Get AU names from loaded models
|
|
420
428
|
au_names = list(self.au_models.keys())
|
|
421
429
|
self.online_au_correction = OnlineAUCorrection(
|
|
@@ -428,15 +436,15 @@ class FullPythonAUPipeline:
|
|
|
428
436
|
clip_values=True
|
|
429
437
|
)
|
|
430
438
|
if self.verbose:
|
|
431
|
-
|
|
439
|
+
safe_print(f"Online AU correction initialized for {len(au_names)} AUs\n")
|
|
432
440
|
|
|
433
441
|
# Component 9: PyFHOG
|
|
434
442
|
if self.verbose:
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
443
|
+
safe_print("[9/9] PyFHOG ready for HOG extraction")
|
|
444
|
+
safe_print("")
|
|
445
|
+
safe_print("All components initialized successfully")
|
|
446
|
+
safe_print("=" * 80)
|
|
447
|
+
safe_print("")
|
|
440
448
|
|
|
441
449
|
self._components_initialized = True
|
|
442
450
|
|
|
@@ -524,7 +532,8 @@ class FullPythonAUPipeline:
|
|
|
524
532
|
self,
|
|
525
533
|
video_path: str,
|
|
526
534
|
output_csv: Optional[str] = None,
|
|
527
|
-
max_frames: Optional[int] = None
|
|
535
|
+
max_frames: Optional[int] = None,
|
|
536
|
+
progress_callback: Optional[Callable[[int, int, float], None]] = None
|
|
528
537
|
) -> pd.DataFrame:
|
|
529
538
|
"""
|
|
530
539
|
Process a video and extract AUs for all frames
|
|
@@ -533,6 +542,8 @@ class FullPythonAUPipeline:
|
|
|
533
542
|
video_path: Path to input video
|
|
534
543
|
output_csv: Optional path to save CSV results
|
|
535
544
|
max_frames: Optional limit on frames to process (for testing)
|
|
545
|
+
progress_callback: Optional callback function(current, total, fps)
|
|
546
|
+
for progress updates to GUI
|
|
536
547
|
|
|
537
548
|
Returns:
|
|
538
549
|
DataFrame with columns: frame, timestamp, success, AU01_r, AU02_r, ...
|
|
@@ -541,14 +552,15 @@ class FullPythonAUPipeline:
|
|
|
541
552
|
self.stored_features = []
|
|
542
553
|
|
|
543
554
|
# Use direct processing implementation
|
|
544
|
-
return self._process_video_impl(video_path, output_csv, max_frames)
|
|
555
|
+
return self._process_video_impl(video_path, output_csv, max_frames, progress_callback)
|
|
545
556
|
|
|
546
557
|
|
|
547
558
|
def _process_video_impl(
|
|
548
559
|
self,
|
|
549
560
|
video_path: str,
|
|
550
561
|
output_csv: Optional[str] = None,
|
|
551
|
-
max_frames: Optional[int] = None
|
|
562
|
+
max_frames: Optional[int] = None,
|
|
563
|
+
progress_callback: Optional[Callable[[int, int, float], None]] = None
|
|
552
564
|
) -> pd.DataFrame:
|
|
553
565
|
"""Internal implementation of video processing"""
|
|
554
566
|
|
|
@@ -560,14 +572,14 @@ class FullPythonAUPipeline:
|
|
|
560
572
|
raise FileNotFoundError(f"Video not found: {video_path}")
|
|
561
573
|
|
|
562
574
|
if self.verbose:
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
575
|
+
safe_print(f"Processing video: {video_path.name}")
|
|
576
|
+
safe_print("=" * 80)
|
|
577
|
+
safe_print("")
|
|
566
578
|
|
|
567
579
|
# Detect video rotation from metadata (important for mobile videos)
|
|
568
580
|
rotation = get_video_rotation(str(video_path))
|
|
569
581
|
if self.verbose and rotation != 0:
|
|
570
|
-
|
|
582
|
+
safe_print(f"Detected video rotation: {rotation}°")
|
|
571
583
|
|
|
572
584
|
# Open video
|
|
573
585
|
cap = cv2.VideoCapture(str(video_path))
|
|
@@ -578,13 +590,13 @@ class FullPythonAUPipeline:
|
|
|
578
590
|
total_frames = min(total_frames, max_frames)
|
|
579
591
|
|
|
580
592
|
if self.verbose:
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
593
|
+
safe_print(f"Video info:")
|
|
594
|
+
safe_print(f" FPS: {fps:.2f}")
|
|
595
|
+
safe_print(f" Total frames: {total_frames}")
|
|
596
|
+
safe_print(f" Duration: {total_frames/fps:.2f} seconds")
|
|
585
597
|
if rotation != 0:
|
|
586
|
-
|
|
587
|
-
|
|
598
|
+
safe_print(f" Rotation: {rotation}° (will be corrected)")
|
|
599
|
+
safe_print("")
|
|
588
600
|
|
|
589
601
|
# Results storage
|
|
590
602
|
results = []
|
|
@@ -593,6 +605,7 @@ class FullPythonAUPipeline:
|
|
|
593
605
|
# Statistics
|
|
594
606
|
total_processed = 0
|
|
595
607
|
total_failed = 0
|
|
608
|
+
processing_start_time = time.time()
|
|
596
609
|
|
|
597
610
|
try:
|
|
598
611
|
while True:
|
|
@@ -618,9 +631,19 @@ class FullPythonAUPipeline:
|
|
|
618
631
|
# Progress update
|
|
619
632
|
if self.verbose and (frame_idx + 1) % 10 == 0:
|
|
620
633
|
progress = (frame_idx + 1) / total_frames * 100
|
|
621
|
-
|
|
634
|
+
safe_print(f"Progress: {frame_idx + 1}/{total_frames} frames ({progress:.1f}%) - "
|
|
622
635
|
f"Success: {total_processed}, Failed: {total_failed}", flush=True)
|
|
623
636
|
|
|
637
|
+
# GUI progress callback (called every frame for smooth updates)
|
|
638
|
+
if progress_callback is not None:
|
|
639
|
+
try:
|
|
640
|
+
# Calculate actual processing FPS (not video fps)
|
|
641
|
+
elapsed = time.time() - processing_start_time
|
|
642
|
+
actual_fps = (frame_idx + 1) / elapsed if elapsed > 0 else 0.0
|
|
643
|
+
progress_callback(frame_idx + 1, total_frames, actual_fps)
|
|
644
|
+
except Exception:
|
|
645
|
+
pass # Don't let callback errors stop processing
|
|
646
|
+
|
|
624
647
|
frame_idx += 1
|
|
625
648
|
|
|
626
649
|
finally:
|
|
@@ -632,25 +655,25 @@ class FullPythonAUPipeline:
|
|
|
632
655
|
# Apply post-processing (cutoff adjustment, temporal smoothing)
|
|
633
656
|
# This is CRITICAL for dynamic AU accuracy!
|
|
634
657
|
if self.verbose:
|
|
635
|
-
|
|
658
|
+
safe_print("\nApplying post-processing (cutoff adjustment, temporal smoothing)...")
|
|
636
659
|
df = self.finalize_predictions(df)
|
|
637
660
|
|
|
638
661
|
if self.verbose:
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
662
|
+
safe_print("")
|
|
663
|
+
safe_print("=" * 80)
|
|
664
|
+
safe_print("PROCESSING COMPLETE")
|
|
665
|
+
safe_print("=" * 80)
|
|
666
|
+
safe_print(f"Total frames processed: {total_processed}")
|
|
667
|
+
safe_print(f"Failed frames: {total_failed}")
|
|
668
|
+
safe_print(f"Success rate: {total_processed/(total_processed+total_failed)*100:.1f}%")
|
|
669
|
+
safe_print("")
|
|
647
670
|
|
|
648
671
|
# Save to CSV if requested
|
|
649
672
|
if output_csv:
|
|
650
673
|
df.to_csv(output_csv, index=False)
|
|
651
674
|
if self.verbose:
|
|
652
|
-
|
|
653
|
-
|
|
675
|
+
safe_print(f"Results saved to: {output_csv}")
|
|
676
|
+
safe_print("")
|
|
654
677
|
|
|
655
678
|
return df
|
|
656
679
|
|
|
@@ -704,7 +727,7 @@ class FullPythonAUPipeline:
|
|
|
704
727
|
if self.track_faces and self.cached_bbox is not None:
|
|
705
728
|
# Try using cached bbox (skip expensive PyMTCNN!)
|
|
706
729
|
if self.verbose and frame_idx < 3:
|
|
707
|
-
|
|
730
|
+
safe_print(f"[Frame {frame_idx}] Step 1: Using cached bbox (tracking mode)")
|
|
708
731
|
bbox = self.cached_bbox
|
|
709
732
|
need_detection = False
|
|
710
733
|
self.frames_since_detection += 1
|
|
@@ -712,10 +735,10 @@ class FullPythonAUPipeline:
|
|
|
712
735
|
if need_detection or bbox is None:
|
|
713
736
|
# First frame OR previous tracking failed - run PyMTCNN
|
|
714
737
|
if self.verbose and frame_idx < 3:
|
|
715
|
-
|
|
738
|
+
safe_print(f"[Frame {frame_idx}] Step 1: Detecting face with {self.face_detector.backend}...")
|
|
716
739
|
detections, _ = self.face_detector.detect_faces(frame)
|
|
717
740
|
if self.verbose and frame_idx < 3:
|
|
718
|
-
|
|
741
|
+
safe_print(f"[Frame {frame_idx}] Step 1: Found {len(detections)} faces")
|
|
719
742
|
|
|
720
743
|
if len(detections) == 0:
|
|
721
744
|
# No face detected - clear cache
|
|
@@ -750,7 +773,7 @@ class FullPythonAUPipeline:
|
|
|
750
773
|
# Step 2: Detect landmarks using CLNF (OpenFace approach)
|
|
751
774
|
t0 = time.time() if debug_info is not None else None
|
|
752
775
|
if self.verbose and frame_idx < 3:
|
|
753
|
-
|
|
776
|
+
safe_print(f"[Frame {frame_idx}] Step 2: Detecting landmarks with CLNF...")
|
|
754
777
|
|
|
755
778
|
try:
|
|
756
779
|
# Convert bbox from [x1, y1, x2, y2] to [x, y, width, height] for pyclnf
|
|
@@ -767,7 +790,7 @@ class FullPythonAUPipeline:
|
|
|
767
790
|
num_iterations = info['iterations']
|
|
768
791
|
|
|
769
792
|
if self.verbose and frame_idx < 3:
|
|
770
|
-
|
|
793
|
+
safe_print(f"[Frame {frame_idx}] Step 2: Got {len(landmarks_68)} landmarks (CLNF converged: {converged}, iterations: {num_iterations})")
|
|
771
794
|
|
|
772
795
|
if debug_info is not None:
|
|
773
796
|
debug_info['landmark_detection'] = {
|
|
@@ -781,7 +804,7 @@ class FullPythonAUPipeline:
|
|
|
781
804
|
# Landmark detection failed with cached bbox - re-run face detection
|
|
782
805
|
if self.track_faces and not need_detection:
|
|
783
806
|
if self.verbose and frame_idx < 3:
|
|
784
|
-
|
|
807
|
+
safe_print(f"[Frame {frame_idx}] Step 2: Landmark detection failed with cached bbox, re-detecting face...")
|
|
785
808
|
self.detection_failures += 1
|
|
786
809
|
self.cached_bbox = None
|
|
787
810
|
|
|
@@ -822,7 +845,7 @@ class FullPythonAUPipeline:
|
|
|
822
845
|
# See GEOMETRIC_FEATURES_BUG.md for details
|
|
823
846
|
t0 = time.time() if debug_info is not None else None
|
|
824
847
|
if self.verbose and frame_idx < 3:
|
|
825
|
-
|
|
848
|
+
safe_print(f"[Frame {frame_idx}] Step 3: Extracting pose from pyclnf params...")
|
|
826
849
|
|
|
827
850
|
if 'params' in info:
|
|
828
851
|
# Use params from pyclnf CLNF optimization (CORRECT approach)
|
|
@@ -856,7 +879,7 @@ class FullPythonAUPipeline:
|
|
|
856
879
|
# Step 4: Align face
|
|
857
880
|
t0 = time.time() if debug_info is not None else None
|
|
858
881
|
if self.verbose and frame_idx < 3:
|
|
859
|
-
|
|
882
|
+
safe_print(f"[Frame {frame_idx}] Step 4: Aligning face...")
|
|
860
883
|
aligned_face = self.face_aligner.align_face(
|
|
861
884
|
image=frame,
|
|
862
885
|
landmarks_68=landmarks_68,
|
|
@@ -867,7 +890,7 @@ class FullPythonAUPipeline:
|
|
|
867
890
|
triangulation=self.triangulation
|
|
868
891
|
)
|
|
869
892
|
if self.verbose and frame_idx < 3:
|
|
870
|
-
|
|
893
|
+
safe_print(f"[Frame {frame_idx}] Step 4: Aligned face shape: {aligned_face.shape}")
|
|
871
894
|
|
|
872
895
|
if debug_info is not None:
|
|
873
896
|
debug_info['alignment'] = {
|
|
@@ -878,7 +901,7 @@ class FullPythonAUPipeline:
|
|
|
878
901
|
# Step 5: Extract HOG features
|
|
879
902
|
t0 = time.time() if debug_info is not None else None
|
|
880
903
|
if self.verbose and frame_idx < 3:
|
|
881
|
-
|
|
904
|
+
safe_print(f"[Frame {frame_idx}] Step 5: Extracting HOG features...")
|
|
882
905
|
hog_features = pyfhog.extract_fhog_features(
|
|
883
906
|
aligned_face,
|
|
884
907
|
cell_size=8
|
|
@@ -886,7 +909,7 @@ class FullPythonAUPipeline:
|
|
|
886
909
|
# pyfhog 0.1.4+ outputs in OpenFace-compatible format (no transpose needed)
|
|
887
910
|
# The HOG flattening order matches C++ OpenFace Face_utils.cpp line 265
|
|
888
911
|
if self.verbose and frame_idx < 3:
|
|
889
|
-
|
|
912
|
+
safe_print(f"[Frame {frame_idx}] Step 5: HOG features shape: {hog_features.shape}")
|
|
890
913
|
|
|
891
914
|
if debug_info is not None:
|
|
892
915
|
debug_info['hog_extraction'] = {
|
|
@@ -897,10 +920,10 @@ class FullPythonAUPipeline:
|
|
|
897
920
|
# Step 6: Extract geometric features
|
|
898
921
|
t0 = time.time() if debug_info is not None else None
|
|
899
922
|
if self.verbose and frame_idx < 3:
|
|
900
|
-
|
|
923
|
+
safe_print(f"[Frame {frame_idx}] Step 6: Extracting geometric features...")
|
|
901
924
|
geom_features = self.pdm_parser.extract_geometric_features(params_local)
|
|
902
925
|
if self.verbose and frame_idx < 3:
|
|
903
|
-
|
|
926
|
+
safe_print(f"[Frame {frame_idx}] Step 6: Geometric features shape: {geom_features.shape}")
|
|
904
927
|
|
|
905
928
|
# Ensure float32 for Cython compatibility
|
|
906
929
|
hog_features = hog_features.astype(np.float32)
|
|
@@ -915,14 +938,14 @@ class FullPythonAUPipeline:
|
|
|
915
938
|
# Step 7: Update running median
|
|
916
939
|
t0 = time.time() if debug_info is not None else None
|
|
917
940
|
if self.verbose and frame_idx < 3:
|
|
918
|
-
|
|
941
|
+
safe_print(f"[Frame {frame_idx}] Step 7: Updating running median...")
|
|
919
942
|
# C++ increments frames_tracking BEFORE the check, so frame 0 → counter=1 → update
|
|
920
943
|
# To match: update on frames 0, 2, 4, 6... (even frames)
|
|
921
944
|
update_histogram = (frame_idx % 2 == 0) # Match C++ timing
|
|
922
945
|
self.running_median.update(hog_features, geom_features, update_histogram=update_histogram)
|
|
923
946
|
running_median = self.running_median.get_combined_median()
|
|
924
947
|
if self.verbose and frame_idx < 3:
|
|
925
|
-
|
|
948
|
+
safe_print(f"[Frame {frame_idx}] Step 7: Running median shape: {running_median.shape}")
|
|
926
949
|
|
|
927
950
|
if debug_info is not None:
|
|
928
951
|
debug_info['running_median'] = {
|
|
@@ -938,7 +961,7 @@ class FullPythonAUPipeline:
|
|
|
938
961
|
# Step 8: Predict AUs
|
|
939
962
|
t0 = time.time() if debug_info is not None else None
|
|
940
963
|
if self.verbose and frame_idx < 3:
|
|
941
|
-
|
|
964
|
+
safe_print(f"[Frame {frame_idx}] Step 8: Predicting AUs...")
|
|
942
965
|
au_results = self._predict_aus(
|
|
943
966
|
hog_features,
|
|
944
967
|
geom_features,
|
|
@@ -974,7 +997,7 @@ class FullPythonAUPipeline:
|
|
|
974
997
|
|
|
975
998
|
except Exception as e:
|
|
976
999
|
if self.verbose:
|
|
977
|
-
|
|
1000
|
+
safe_print(f"Warning: Frame {frame_idx} failed: {e}")
|
|
978
1001
|
|
|
979
1002
|
return result
|
|
980
1003
|
|
|
@@ -1049,9 +1072,9 @@ class FullPythonAUPipeline:
|
|
|
1049
1072
|
DataFrame with finalized AU predictions
|
|
1050
1073
|
"""
|
|
1051
1074
|
if self.verbose:
|
|
1052
|
-
|
|
1053
|
-
|
|
1054
|
-
|
|
1075
|
+
safe_print("")
|
|
1076
|
+
safe_print("Applying post-processing...")
|
|
1077
|
+
safe_print(" [1/3] Two-pass median correction...")
|
|
1055
1078
|
|
|
1056
1079
|
# Two-pass reprocessing: Re-predict AUs for early frames using final running median
|
|
1057
1080
|
# This fixes systematic baseline offset from immature running median in early frames
|
|
@@ -1059,7 +1082,7 @@ class FullPythonAUPipeline:
|
|
|
1059
1082
|
final_median = self.running_median.get_combined_median()
|
|
1060
1083
|
|
|
1061
1084
|
if self.verbose:
|
|
1062
|
-
|
|
1085
|
+
safe_print(f" Re-predicting {len(self.stored_features)} early frames with final median...")
|
|
1063
1086
|
|
|
1064
1087
|
# Re-predict AUs for stored frames
|
|
1065
1088
|
for frame_idx, hog_features, geom_features in self.stored_features:
|
|
@@ -1074,13 +1097,13 @@ class FullPythonAUPipeline:
|
|
|
1074
1097
|
self.stored_features = []
|
|
1075
1098
|
|
|
1076
1099
|
if self.verbose:
|
|
1077
|
-
|
|
1100
|
+
safe_print(f" Two-pass correction complete")
|
|
1078
1101
|
else:
|
|
1079
1102
|
if self.verbose:
|
|
1080
|
-
|
|
1103
|
+
safe_print(" (No stored features - skipping)")
|
|
1081
1104
|
|
|
1082
1105
|
if self.verbose:
|
|
1083
|
-
|
|
1106
|
+
safe_print(" [2/3] Cutoff adjustment...")
|
|
1084
1107
|
|
|
1085
1108
|
# Apply cutoff adjustment for dynamic models
|
|
1086
1109
|
au_cols = [col for col in df.columns if col.startswith('AU') and col.endswith('_r')]
|
|
@@ -1136,7 +1159,7 @@ class FullPythonAUPipeline:
|
|
|
1136
1159
|
df[au_col] = np.clip(au_values - offset, 0.0, 5.0)
|
|
1137
1160
|
|
|
1138
1161
|
if self.verbose:
|
|
1139
|
-
|
|
1162
|
+
safe_print(" [3/3] Temporal smoothing...")
|
|
1140
1163
|
|
|
1141
1164
|
# Apply 3-frame moving average
|
|
1142
1165
|
for au_col in au_cols:
|
|
@@ -1144,7 +1167,7 @@ class FullPythonAUPipeline:
|
|
|
1144
1167
|
df[au_col] = smoothed
|
|
1145
1168
|
|
|
1146
1169
|
if self.verbose:
|
|
1147
|
-
|
|
1170
|
+
safe_print("Post-processing complete")
|
|
1148
1171
|
|
|
1149
1172
|
return df
|
|
1150
1173
|
|
|
@@ -1205,7 +1228,7 @@ Examples:
|
|
|
1205
1228
|
verbose=True
|
|
1206
1229
|
)
|
|
1207
1230
|
except Exception as e:
|
|
1208
|
-
|
|
1231
|
+
safe_print(f"Failed to initialize pipeline: {e}")
|
|
1209
1232
|
return 1
|
|
1210
1233
|
|
|
1211
1234
|
# Process video
|
|
@@ -1222,28 +1245,28 @@ Examples:
|
|
|
1222
1245
|
# Save final results
|
|
1223
1246
|
df.to_csv(args.output, index=False)
|
|
1224
1247
|
|
|
1225
|
-
|
|
1226
|
-
|
|
1227
|
-
|
|
1228
|
-
|
|
1229
|
-
|
|
1230
|
-
|
|
1248
|
+
safe_print("=" * 80)
|
|
1249
|
+
safe_print("SUCCESS")
|
|
1250
|
+
safe_print("=" * 80)
|
|
1251
|
+
safe_print(f"Processed {len(df)} frames")
|
|
1252
|
+
safe_print(f"Results saved to: {args.output}")
|
|
1253
|
+
safe_print("")
|
|
1231
1254
|
|
|
1232
1255
|
# Show AU statistics
|
|
1233
1256
|
au_cols = [col for col in df.columns if col.startswith('AU') and col.endswith('_r')]
|
|
1234
1257
|
if au_cols:
|
|
1235
|
-
|
|
1258
|
+
safe_print("AU Statistics:")
|
|
1236
1259
|
for au_col in sorted(au_cols):
|
|
1237
1260
|
success_frames = df[df['success'] == True]
|
|
1238
1261
|
if len(success_frames) > 0:
|
|
1239
1262
|
mean_val = success_frames[au_col].mean()
|
|
1240
1263
|
max_val = success_frames[au_col].max()
|
|
1241
|
-
|
|
1264
|
+
safe_print(f" {au_col}: mean={mean_val:.3f}, max={max_val:.3f}")
|
|
1242
1265
|
|
|
1243
1266
|
return 0
|
|
1244
1267
|
|
|
1245
1268
|
except Exception as e:
|
|
1246
|
-
|
|
1269
|
+
safe_print(f"Processing failed: {e}")
|
|
1247
1270
|
import traceback
|
|
1248
1271
|
traceback.print_exc()
|
|
1249
1272
|
return 1
|
|
@@ -14,6 +14,14 @@ from typing import Optional, Callable
|
|
|
14
14
|
from .pipeline import FullPythonAUPipeline
|
|
15
15
|
|
|
16
16
|
|
|
17
|
+
def safe_print(*args, **kwargs):
|
|
18
|
+
"""Print wrapper that handles BrokenPipeError in GUI subprocess contexts."""
|
|
19
|
+
try:
|
|
20
|
+
print(*args, **kwargs)
|
|
21
|
+
except (BrokenPipeError, IOError):
|
|
22
|
+
pass # Stdout disconnected (e.g., GUI subprocess terminated)
|
|
23
|
+
|
|
24
|
+
|
|
17
25
|
class OpenFaceProcessor:
|
|
18
26
|
"""
|
|
19
27
|
OpenFace 2.2-compatible AU extraction processor.
|
|
@@ -74,7 +82,7 @@ class OpenFaceProcessor:
|
|
|
74
82
|
weights_dir = Path(weights_dir)
|
|
75
83
|
|
|
76
84
|
if self.verbose:
|
|
77
|
-
|
|
85
|
+
safe_print("Initializing PyFaceAU (OpenFace 2.2 Python replacement)...")
|
|
78
86
|
|
|
79
87
|
# Initialize the PyFaceAU pipeline (OpenFace-compatible: PyMTCNN → CLNF → AU)
|
|
80
88
|
self.pipeline = FullPythonAUPipeline(
|
|
@@ -88,10 +96,10 @@ class OpenFaceProcessor:
|
|
|
88
96
|
)
|
|
89
97
|
|
|
90
98
|
if self.verbose:
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
99
|
+
safe_print(f" PyFaceAU initialized")
|
|
100
|
+
safe_print(f" CLNF refinement: {'Enabled' if use_clnf_refinement else 'Disabled'}")
|
|
101
|
+
safe_print(f" Expected accuracy: r > 0.92 (OpenFace 2.2 correlation)")
|
|
102
|
+
safe_print()
|
|
95
103
|
|
|
96
104
|
def process_video(
|
|
97
105
|
self,
|
|
@@ -118,7 +126,7 @@ class OpenFaceProcessor:
|
|
|
118
126
|
output_csv_path = Path(output_csv_path)
|
|
119
127
|
|
|
120
128
|
if self.verbose:
|
|
121
|
-
|
|
129
|
+
safe_print(f"Processing: {video_path.name}")
|
|
122
130
|
|
|
123
131
|
# Ensure output directory exists
|
|
124
132
|
output_csv_path.parent.mkdir(parents=True, exist_ok=True)
|
|
@@ -128,24 +136,25 @@ class OpenFaceProcessor:
|
|
|
128
136
|
df = self.pipeline.process_video(
|
|
129
137
|
video_path=str(video_path),
|
|
130
138
|
output_csv=str(output_csv_path),
|
|
131
|
-
max_frames=None
|
|
139
|
+
max_frames=None,
|
|
140
|
+
progress_callback=progress_callback
|
|
132
141
|
)
|
|
133
142
|
|
|
134
143
|
success_count = df['success'].sum()
|
|
135
144
|
|
|
136
145
|
if self.verbose:
|
|
137
146
|
total_frames = len(df)
|
|
138
|
-
|
|
147
|
+
safe_print(f" Processed {success_count}/{total_frames} frames successfully")
|
|
139
148
|
if success_count < total_frames:
|
|
140
149
|
failed = total_frames - success_count
|
|
141
|
-
|
|
142
|
-
|
|
150
|
+
safe_print(f" {failed} frames failed (no face detected)")
|
|
151
|
+
safe_print(f" Output: {output_csv_path}")
|
|
143
152
|
|
|
144
153
|
return int(success_count)
|
|
145
154
|
|
|
146
155
|
except Exception as e:
|
|
147
156
|
if self.verbose:
|
|
148
|
-
|
|
157
|
+
safe_print(f" Error processing video: {e}")
|
|
149
158
|
raise
|
|
150
159
|
|
|
151
160
|
def clear_cache(self):
|
|
@@ -204,14 +213,14 @@ def process_videos(
|
|
|
204
213
|
output_dir='/path/to/output',
|
|
205
214
|
use_clnf_refinement=True
|
|
206
215
|
)
|
|
207
|
-
|
|
216
|
+
safe_print(f"Processed {count} videos")
|
|
208
217
|
```
|
|
209
218
|
"""
|
|
210
219
|
directory_path = Path(directory_path)
|
|
211
220
|
|
|
212
221
|
# Check if directory exists
|
|
213
222
|
if not directory_path.is_dir():
|
|
214
|
-
|
|
223
|
+
safe_print(f"Error: Directory '{directory_path}' does not exist.")
|
|
215
224
|
return 0
|
|
216
225
|
|
|
217
226
|
# Determine output directory
|
|
@@ -223,7 +232,7 @@ def process_videos(
|
|
|
223
232
|
output_dir = Path(output_dir)
|
|
224
233
|
|
|
225
234
|
output_dir.mkdir(parents=True, exist_ok=True)
|
|
226
|
-
|
|
235
|
+
safe_print(f"Output directory: {output_dir}")
|
|
227
236
|
|
|
228
237
|
# Initialize processor
|
|
229
238
|
processor = OpenFaceProcessor(**processor_kwargs)
|
|
@@ -237,24 +246,24 @@ def process_videos(
|
|
|
237
246
|
if specific_files:
|
|
238
247
|
# Process only the specific files
|
|
239
248
|
files_to_process = [Path(f) for f in specific_files]
|
|
240
|
-
|
|
249
|
+
safe_print(f"Processing {len(files_to_process)} specific files from current session.")
|
|
241
250
|
else:
|
|
242
251
|
# Process all eligible files in the directory
|
|
243
252
|
files_to_process = list(directory_path.iterdir())
|
|
244
|
-
|
|
253
|
+
safe_print(f"Processing all eligible files in {directory_path}")
|
|
245
254
|
|
|
246
255
|
# Process each file
|
|
247
256
|
for file_path in files_to_process:
|
|
248
257
|
# Skip if not a file or doesn't exist
|
|
249
258
|
if not file_path.is_file():
|
|
250
|
-
|
|
259
|
+
safe_print(f"Warning: {file_path} does not exist or is not a file. Skipping.")
|
|
251
260
|
continue
|
|
252
261
|
|
|
253
262
|
filename = file_path.name
|
|
254
263
|
|
|
255
264
|
# Skip files with 'debug' in the filename
|
|
256
265
|
if 'debug' in filename:
|
|
257
|
-
|
|
266
|
+
safe_print(f"Skipping debug file: {filename}")
|
|
258
267
|
continue
|
|
259
268
|
|
|
260
269
|
# Process file with 'mirrored' in the filename
|
|
@@ -270,13 +279,13 @@ def process_videos(
|
|
|
270
279
|
|
|
271
280
|
if frame_count > 0:
|
|
272
281
|
processed_count += 1
|
|
273
|
-
|
|
282
|
+
safe_print(f"Successfully processed: {filename}\n")
|
|
274
283
|
else:
|
|
275
|
-
|
|
284
|
+
safe_print(f"Failed to process: {filename}\n")
|
|
276
285
|
|
|
277
286
|
except Exception as e:
|
|
278
|
-
|
|
287
|
+
safe_print(f"Error processing {filename}: {e}\n")
|
|
279
288
|
|
|
280
|
-
|
|
289
|
+
safe_print(f"\nProcessing complete. {processed_count} files were processed.")
|
|
281
290
|
|
|
282
291
|
return processed_count
|
|
@@ -11,7 +11,7 @@ long_description = (this_directory / "README.md").read_text()
|
|
|
11
11
|
|
|
12
12
|
setup(
|
|
13
13
|
name="pyfaceau",
|
|
14
|
-
version="1.3.
|
|
14
|
+
version="1.3.6",
|
|
15
15
|
author="John Wilson",
|
|
16
16
|
author_email="", # Add email if desired
|
|
17
17
|
description="Pure Python OpenFace 2.2 AU extraction with PyMTCNN face detection and CLNF refinement",
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/utils/cython_extensions/cython_histogram_median.pyx
RENAMED
|
File without changes
|
{pyfaceau-1.3.5 → pyfaceau-1.3.7}/pyfaceau/utils/cython_extensions/cython_rotation_update.pyx
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|