pyfaceau 1.0.9__tar.gz → 1.3.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. pyfaceau-1.3.0/LICENSE +47 -0
  2. {pyfaceau-1.0.9/pyfaceau.egg-info → pyfaceau-1.3.0}/PKG-INFO +2 -2
  3. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau/alignment/face_aligner.py +57 -27
  4. pyfaceau-1.3.0/pyfaceau/alignment/paw.py +285 -0
  5. pyfaceau-1.3.0/pyfaceau/data/__init__.py +19 -0
  6. pyfaceau-1.3.0/pyfaceau/data/hdf5_dataset.py +508 -0
  7. pyfaceau-1.3.0/pyfaceau/data/quality_filter.py +277 -0
  8. pyfaceau-1.3.0/pyfaceau/data/training_data_generator.py +548 -0
  9. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau/detectors/__init__.py +8 -4
  10. pyfaceau-1.3.0/pyfaceau/detectors/retinaface.py +352 -0
  11. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau/download_weights.py +3 -3
  12. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau/features/histogram_median_tracker.py +14 -26
  13. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau/features/pdm.py +55 -9
  14. pyfaceau-1.3.0/pyfaceau/nn/__init__.py +88 -0
  15. pyfaceau-1.3.0/pyfaceau/nn/au_prediction_inference.py +447 -0
  16. pyfaceau-1.3.0/pyfaceau/nn/au_prediction_net.py +501 -0
  17. pyfaceau-1.3.0/pyfaceau/nn/landmark_pose_inference.py +536 -0
  18. pyfaceau-1.3.0/pyfaceau/nn/landmark_pose_net.py +497 -0
  19. pyfaceau-1.3.0/pyfaceau/nn/train_au_prediction.py +521 -0
  20. pyfaceau-1.3.0/pyfaceau/nn/train_landmark_pose.py +508 -0
  21. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau/pipeline.py +220 -65
  22. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau/processor.py +3 -5
  23. {pyfaceau-1.0.9 → pyfaceau-1.3.0/pyfaceau.egg-info}/PKG-INFO +2 -2
  24. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau.egg-info/SOURCES.txt +13 -8
  25. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau.egg-info/requires.txt +1 -1
  26. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyproject.toml +3 -3
  27. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/setup.py +3 -40
  28. pyfaceau-1.0.9/COMMERCIAL-LICENSE.md +0 -109
  29. pyfaceau-1.0.9/LICENSE +0 -40
  30. pyfaceau-1.0.9/pyfaceau/clnf/__init__.py +0 -20
  31. pyfaceau-1.0.9/pyfaceau/clnf/cen_patch_experts.py +0 -439
  32. pyfaceau-1.0.9/pyfaceau/clnf/clnf_detector.py +0 -134
  33. pyfaceau-1.0.9/pyfaceau/clnf/nu_rlms.py +0 -248
  34. pyfaceau-1.0.9/pyfaceau/clnf/pdm.py +0 -206
  35. pyfaceau-1.0.9/pyfaceau/utils/cython_extensions/cython_histogram_median.c +0 -34924
  36. pyfaceau-1.0.9/pyfaceau/utils/cython_extensions/cython_rotation_update.c +0 -32038
  37. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/MANIFEST.in +0 -0
  38. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/README.md +0 -0
  39. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau/__init__.py +0 -0
  40. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau/alignment/__init__.py +0 -0
  41. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau/alignment/calc_params.py +0 -0
  42. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau/alignment/numba_calcparams_accelerator.py +0 -0
  43. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau/detectors/extract_mtcnn_weights.py +0 -0
  44. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau/detectors/openface_mtcnn.py +0 -0
  45. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau/detectors/pfld.py +0 -0
  46. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau/detectors/pymtcnn_detector.py +0 -0
  47. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau/features/__init__.py +0 -0
  48. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau/features/triangulation.py +0 -0
  49. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau/parallel_pipeline.py +0 -0
  50. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau/prediction/__init__.py +0 -0
  51. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau/prediction/au_predictor.py +0 -0
  52. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau/prediction/batched_au_predictor.py +0 -0
  53. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau/prediction/model_parser.py +0 -0
  54. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau/prediction/running_median.py +0 -0
  55. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau/prediction/running_median_fallback.py +0 -0
  56. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau/refinement/__init__.py +0 -0
  57. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau/refinement/pdm.py +0 -0
  58. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau/refinement/svr_patch_expert.py +0 -0
  59. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau/refinement/targeted_refiner.py +0 -0
  60. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau/utils/__init__.py +0 -0
  61. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau/utils/cython_extensions/cython_histogram_median.pyx +0 -0
  62. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau/utils/cython_extensions/cython_rotation_update.pyx +0 -0
  63. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau/utils/cython_extensions/setup.py +0 -0
  64. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau.egg-info/dependency_links.txt +0 -0
  65. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau.egg-info/entry_points.txt +0 -0
  66. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau.egg-info/not-zip-safe +0 -0
  67. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau.egg-info/top_level.txt +0 -0
  68. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/pyfaceau_gui.py +0 -0
  69. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/requirements.txt +0 -0
  70. {pyfaceau-1.0.9 → pyfaceau-1.3.0}/setup.cfg +0 -0
pyfaceau-1.3.0/LICENSE ADDED
@@ -0,0 +1,47 @@
1
+ Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0)
2
+
3
+ Copyright (c) 2025 John Wilson IV, MD
4
+
5
+ This work is licensed under the Creative Commons Attribution-NonCommercial 4.0
6
+ International License. To view a copy of this license, visit
7
+ http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
8
+ Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
9
+
10
+ ================================================================================
11
+
12
+ You are free to:
13
+
14
+ * Share — copy and redistribute the material in any medium or format
15
+ * Adapt — remix, transform, and build upon the material
16
+
17
+ The licensor cannot revoke these freedoms as long as you follow the license terms.
18
+
19
+ ================================================================================
20
+
21
+ Under the following terms:
22
+
23
+ * Attribution — You must give appropriate credit, provide a link to the
24
+ license, and indicate if changes were made. You may do so in any reasonable
25
+ manner, but not in any way that suggests the licensor endorses you or your use.
26
+
27
+ * NonCommercial — You may not use the material for commercial purposes.
28
+
29
+ * No additional restrictions — You may not apply legal terms or technological
30
+ measures that legally restrict others from doing anything the license permits.
31
+
32
+ ================================================================================
33
+
34
+ Notices:
35
+
36
+ You do not have to comply with the license for elements of the material in the
37
+ public domain or where your use is permitted by an applicable exception or
38
+ limitation.
39
+
40
+ No warranties are given. The license may not give you all of the permissions
41
+ necessary for your intended use. For example, other rights such as publicity,
42
+ privacy, or moral rights may limit how you use the material.
43
+
44
+ ================================================================================
45
+
46
+ Full legal code available at:
47
+ https://creativecommons.org/licenses/by-nc/4.0/legalcode
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pyfaceau
3
- Version: 1.0.9
3
+ Version: 1.3.0
4
4
  Summary: Pure Python OpenFace 2.2 AU extraction with CLNF landmark refinement
5
5
  Home-page: https://github.com/johnwilsoniv/face-analysis
6
6
  Author: John Wilson
@@ -32,7 +32,7 @@ Requires-Dist: scipy>=1.7.0
32
32
  Requires-Dist: scikit-learn>=1.0.0
33
33
  Requires-Dist: tqdm>=4.62.0
34
34
  Requires-Dist: pyfhog>=0.1.0
35
- Requires-Dist: Cython>=0.29.0
35
+ Requires-Dist: pyclnf>=0.2.0
36
36
  Provides-Extra: dev
37
37
  Requires-Dist: pytest>=7.0.0; extra == "dev"
38
38
  Requires-Dist: black>=22.0.0; extra == "dev"
@@ -100,12 +100,12 @@ class OpenFace22FaceAligner:
100
100
  source_rigid = self._extract_rigid_points(landmarks_68)
101
101
  dest_rigid = self._extract_rigid_points(self.reference_shape)
102
102
 
103
- # Compute scale (no rotation from Kabsch)
104
- scale_identity = self._align_shapes_with_scale(source_rigid, dest_rigid)
105
- scale = scale_identity[0, 0] # Extract scale from identity matrix
103
+ # Compute scale (no rotation from Kabsch) - matching working commit approach
104
+ scale_identity = self._compute_scale_only(source_rigid, dest_rigid)
105
+ scale = scale_identity
106
106
 
107
- # Apply INVERSE of CSV p_rz rotation
108
- # CSV p_rz describes rotation FROM canonical TO tilted
107
+ # Apply INVERSE of p_rz rotation
108
+ # p_rz describes rotation FROM canonical TO tilted
109
109
  # We need rotation FROM tilted TO canonical, which is -p_rz
110
110
  angle = -p_rz
111
111
  cos_a = np.cos(angle)
@@ -117,7 +117,7 @@ class OpenFace22FaceAligner:
117
117
  # Combine scale and rotation
118
118
  scale_rot_matrix = scale * R
119
119
 
120
- # Build 2×3 affine warp matrix
120
+ # Build 2×3 affine warp matrix using pose translation
121
121
  warp_matrix = self._build_warp_matrix(scale_rot_matrix, pose_tx, pose_ty)
122
122
 
123
123
  # Apply affine transformation
@@ -239,6 +239,37 @@ class OpenFace22FaceAligner:
239
239
 
240
240
  return warp_matrix
241
241
 
242
+ def _build_warp_matrix_centroid(self, scale_rot: np.ndarray, src_centroid: np.ndarray, dst_centroid: np.ndarray) -> np.ndarray:
243
+ """
244
+ Build 2×3 affine warp matrix using source and destination centroids
245
+
246
+ This is the corrected version that uses rigid point centroids instead of
247
+ pose translation parameters, which gives better alignment with C++ OpenFace.
248
+
249
+ Args:
250
+ scale_rot: (2, 2) similarity transform matrix (scale × rotation)
251
+ src_centroid: (2,) centroid of source rigid points
252
+ dst_centroid: (2,) centroid of destination rigid points
253
+
254
+ Returns:
255
+ (2, 3) affine warp matrix for cv2.warpAffine
256
+ """
257
+ # Initialize 2×3 warp matrix
258
+ warp_matrix = np.zeros((2, 3), dtype=np.float32)
259
+
260
+ # Copy scale-rotation to first 2×2 block
261
+ warp_matrix[:2, :2] = scale_rot
262
+
263
+ # Transform source centroid through scale-rotation
264
+ T_src = scale_rot @ src_centroid
265
+
266
+ # Translation: map src_centroid to dst_centroid, then center in output
267
+ # dst_centroid is in PDM space (centered around 0), so add output_center
268
+ warp_matrix[0, 2] = dst_centroid[0] - T_src[0] + self.output_width / 2
269
+ warp_matrix[1, 2] = dst_centroid[1] - T_src[1] + self.output_height / 2
270
+
271
+ return warp_matrix
272
+
242
273
  def _extract_rigid_points(self, landmarks: np.ndarray) -> np.ndarray:
243
274
  """
244
275
  Extract 24 rigid points from 68 landmarks
@@ -286,37 +317,34 @@ class OpenFace22FaceAligner:
286
317
 
287
318
  # Rotation matrix: R = V^T × corr × U^T
288
319
  # OpenFace C++ uses: R = svd.vt.t() * corr * svd.u.t()
320
+ # But we need to transpose to match C++ behavior
321
+ # Testing showed R.T gives correct rotation direction (+18° vs -18°)
289
322
  R = Vt.T @ corr @ U.T
290
323
 
291
- return R
324
+ return R.T # Transpose to match C++ rotation direction
292
325
 
293
- def _align_shapes_with_scale(self, src: np.ndarray, dst: np.ndarray) -> np.ndarray:
326
+ def _align_shapes_with_scale_and_rotation(self, src: np.ndarray, dst: np.ndarray) -> np.ndarray:
294
327
  """
295
- Compute similarity transform (scale only, NO rotation) between two point sets
328
+ Compute similarity transform (scale + rotation via Kabsch) between two point sets
296
329
 
297
- CRITICAL FIX: Since CSV landmarks are PDM-reconstructed (via CalcShape2D),
298
- they are already in canonical orientation. We only need scale + translation,
299
- NOT rotation via Kabsch.
330
+ This matches C++ AlignShapesWithScale in RotationHelpers.h lines 195-241.
300
331
 
301
- Background: FaceAnalyser.cpp calls CalcParams TWICE:
302
- 1. On raw landmarks → params_global₁ → CalcShape2D → reconstructed landmarks (CSV output)
303
- 2. On reconstructed landmarks → params_global₂ → AlignFace
332
+ CRITICAL: p_rz is NOT used for alignment! C++ computes rotation from landmarks
333
+ using Kabsch algorithm.
304
334
 
305
- The second CalcParams produces near-zero rotation because reconstructed landmarks
306
- are already canonical. Our Python uses CSV landmarks (already canonical), so we
307
- skip rotation computation entirely.
308
-
309
- Algorithm:
335
+ Algorithm (matching C++):
310
336
  1. Mean-normalize both src and dst
311
337
  2. Compute RMS scale for each
312
- 3. Return: (s_dst / s_src) × Identity (scale only, no rotation)
338
+ 3. Normalize by scale
339
+ 4. Compute rotation via Kabsch2D
340
+ 5. Return: (s_dst / s_src) × R_kabsch
313
341
 
314
342
  Args:
315
343
  src: (N, 2) source points (detected landmarks)
316
344
  dst: (N, 2) destination points (reference shape)
317
345
 
318
346
  Returns:
319
- (2, 2) similarity transform matrix (scale × identity)
347
+ (2, 2) similarity transform matrix (scale × rotation)
320
348
  """
321
349
  n = src.shape[0]
322
350
 
@@ -335,18 +363,20 @@ class OpenFace22FaceAligner:
335
363
  dst_mean_normed[:, 1] -= mean_dst_y
336
364
 
337
365
  # 2. Compute RMS scale for each point set
338
- # OpenFace C++ uses: sqrt(sum(points^2) / n)
366
+ # C++ RotationHelpers.h line 221-222
339
367
  src_sq = src_mean_normed ** 2
340
368
  dst_sq = dst_mean_normed ** 2
341
369
 
342
370
  s_src = np.sqrt(np.sum(src_sq) / n)
343
371
  s_dst = np.sqrt(np.sum(dst_sq) / n)
344
372
 
345
- # 3. Normalize by scale
373
+ # 3. Normalize by scale (C++ line 224-225)
346
374
  src_norm = src_mean_normed / s_src
347
375
  dst_norm = dst_mean_normed / s_dst
348
376
 
349
- # 3. Return scale only (no rotation computed via Kabsch)
350
- # Rotation will be provided externally from CSV p_rz
377
+ # 4. Get rotation via Kabsch2D (C++ line 230)
378
+ R = self._align_shapes_kabsch_2d(src_norm, dst_norm)
379
+
380
+ # 5. Return scale * rotation (C++ line 233)
351
381
  scale = s_dst / s_src
352
- return scale * np.eye(2, dtype=np.float32)
382
+ return scale * R
@@ -0,0 +1,285 @@
1
+ """
2
+ Piecewise Affine Warp (PAW) for face alignment.
3
+
4
+ Based on OpenFace implementation:
5
+ - lib/local/LandmarkDetector/src/PAW.cpp
6
+ - Active Appearance Models Revisited (Matthews & Baker, IJCV 2004)
7
+
8
+ This implementation matches the C++ PAW algorithm for pixel-perfect alignment.
9
+ """
10
+
11
+ import numpy as np
12
+ import cv2
13
+ from typing import Tuple, Optional
14
+
15
+
16
+ class PAW:
17
+ """
18
+ Piecewise Affine Warp using triangulation.
19
+
20
+ Warps faces by applying independent affine transforms to each triangle,
21
+ allowing for complex non-affine deformations.
22
+ """
23
+
24
+ def __init__(self, destination_landmarks: np.ndarray, triangulation: np.ndarray,
25
+ min_x: Optional[float] = None, min_y: Optional[float] = None,
26
+ max_x: Optional[float] = None, max_y: Optional[float] = None):
27
+ """
28
+ Initialize PAW with destination shape and triangulation.
29
+
30
+ Args:
31
+ destination_landmarks: (2*N,) array with [x0...xN, y0...yN] format
32
+ triangulation: (M, 3) array of triangle vertex indices
33
+ min_x, min_y, max_x, max_y: Optional bounds for output image
34
+ """
35
+ self.destination_landmarks = destination_landmarks.copy()
36
+ self.triangulation = triangulation.copy()
37
+
38
+ num_points = len(destination_landmarks) // 2
39
+ num_tris = len(triangulation)
40
+
41
+ # Extract x and y coordinates
42
+ xs = destination_landmarks[:num_points]
43
+ ys = destination_landmarks[num_points:]
44
+
45
+ # Pre-compute alpha and beta coefficients for each triangle
46
+ self.alpha = np.zeros((num_tris, 3), dtype=np.float32)
47
+ self.beta = np.zeros((num_tris, 3), dtype=np.float32)
48
+
49
+ # Store triangle bounding boxes for optimization
50
+ self.triangle_bounds = []
51
+
52
+ for tri_idx in range(num_tris):
53
+ j, k, l = triangulation[tri_idx]
54
+
55
+ # Compute coefficients (from PAW.cpp lines 83-96)
56
+ c1 = ys[l] - ys[j]
57
+ c2 = xs[l] - xs[j]
58
+ c4 = ys[k] - ys[j]
59
+ c3 = xs[k] - xs[j]
60
+ c5 = c3 * c1 - c2 * c4
61
+
62
+ if abs(c5) < 1e-10:
63
+ # Degenerate triangle, skip
64
+ continue
65
+
66
+ self.alpha[tri_idx, 0] = (ys[j] * c2 - xs[j] * c1) / c5
67
+ self.alpha[tri_idx, 1] = c1 / c5
68
+ self.alpha[tri_idx, 2] = -c2 / c5
69
+
70
+ self.beta[tri_idx, 0] = (xs[j] * c4 - ys[j] * c3) / c5
71
+ self.beta[tri_idx, 1] = -c4 / c5
72
+ self.beta[tri_idx, 2] = c3 / c5
73
+
74
+ # Store triangle vertices and bounding box for point-in-triangle tests
75
+ tri_xs = [xs[j], xs[k], xs[l]]
76
+ tri_ys = [ys[j], ys[k], ys[l]]
77
+ self.triangle_bounds.append({
78
+ 'vertices': [(tri_xs[i], tri_ys[i]) for i in range(3)],
79
+ 'min_x': min(tri_xs),
80
+ 'max_x': max(tri_xs),
81
+ 'min_y': min(tri_ys),
82
+ 'max_y': max(tri_ys)
83
+ })
84
+
85
+ # Determine output image bounds
86
+ if min_x is None:
87
+ min_x = float(np.min(xs))
88
+ min_y = float(np.min(ys))
89
+ max_x = float(np.max(xs))
90
+ max_y = float(np.max(ys))
91
+
92
+ self.min_x = min_x
93
+ self.min_y = min_y
94
+
95
+ width = int(max_x - min_x + 1.5)
96
+ height = int(max_y - min_y + 1.5)
97
+
98
+ # Create pixel mask and triangle ID map
99
+ self.pixel_mask = np.zeros((height, width), dtype=np.uint8)
100
+ self.triangle_id = np.full((height, width), -1, dtype=np.int32)
101
+
102
+ # Determine which triangle each pixel belongs to
103
+ curr_tri = -1
104
+ for y in range(height):
105
+ for x in range(width):
106
+ px = x + min_x
107
+ py = y + min_y
108
+ curr_tri = self._find_triangle(px, py, curr_tri)
109
+ if curr_tri != -1:
110
+ self.triangle_id[y, x] = curr_tri
111
+ self.pixel_mask[y, x] = 1
112
+
113
+ # Pre-allocate arrays
114
+ self.coefficients = np.zeros((num_tris, 6), dtype=np.float32)
115
+ self.map_x = np.zeros((height, width), dtype=np.float32)
116
+ self.map_y = np.zeros((height, width), dtype=np.float32)
117
+
118
+ def warp(self, image: np.ndarray, source_landmarks: np.ndarray) -> np.ndarray:
119
+ """
120
+ Warp image using source landmarks to destination landmarks.
121
+
122
+ Args:
123
+ image: Source image to warp
124
+ source_landmarks: (2*N,) array with [x0...xN, y0...yN] format
125
+
126
+ Returns:
127
+ Warped image matching destination shape
128
+ """
129
+ # Compute warp coefficients from source landmarks
130
+ self._calc_coeff(source_landmarks)
131
+
132
+ # Compute pixel mapping (where to sample from)
133
+ self._warp_region()
134
+
135
+ # Apply warp using OpenCV remap with bilinear interpolation
136
+ warped = cv2.remap(image, self.map_x, self.map_y, cv2.INTER_LINEAR)
137
+
138
+ return warped
139
+
140
+ def _calc_coeff(self, source_landmarks: np.ndarray):
141
+ """
142
+ Calculate warping coefficients from source landmarks.
143
+ Matches PAW::CalcCoeff() in PAW.cpp lines 338-370.
144
+ """
145
+ num_points = len(source_landmarks) // 2
146
+
147
+ for tri_idx in range(len(self.triangulation)):
148
+ i, j, k = self.triangulation[tri_idx]
149
+
150
+ # Extract source coordinates for triangle vertices
151
+ c1 = source_landmarks[i]
152
+ c2 = source_landmarks[j] - c1
153
+ c3 = source_landmarks[k] - c1
154
+ c4 = source_landmarks[i + num_points]
155
+ c5 = source_landmarks[j + num_points] - c4
156
+ c6 = source_landmarks[k + num_points] - c4
157
+
158
+ # Get precomputed alpha and beta
159
+ alpha = self.alpha[tri_idx]
160
+ beta = self.beta[tri_idx]
161
+
162
+ # Compute 6 coefficients for affine transform
163
+ self.coefficients[tri_idx, 0] = c1 + c2 * alpha[0] + c3 * beta[0]
164
+ self.coefficients[tri_idx, 1] = c2 * alpha[1] + c3 * beta[1]
165
+ self.coefficients[tri_idx, 2] = c2 * alpha[2] + c3 * beta[2]
166
+ self.coefficients[tri_idx, 3] = c4 + c5 * alpha[0] + c6 * beta[0]
167
+ self.coefficients[tri_idx, 4] = c5 * alpha[1] + c6 * beta[1]
168
+ self.coefficients[tri_idx, 5] = c5 * alpha[2] + c6 * beta[2]
169
+
170
+ def _warp_region(self):
171
+ """
172
+ Compute source pixel coordinates for each destination pixel.
173
+ Matches PAW::WarpRegion() in PAW.cpp lines 374-436.
174
+ """
175
+ height, width = self.pixel_mask.shape
176
+
177
+ for y in range(height):
178
+ yi = float(y) + self.min_y
179
+
180
+ for x in range(width):
181
+ xi = float(x) + self.min_x
182
+
183
+ if self.pixel_mask[y, x] == 0:
184
+ # Outside face region
185
+ self.map_x[y, x] = -1
186
+ self.map_y[y, x] = -1
187
+ else:
188
+ # Get triangle for this pixel
189
+ tri_idx = self.triangle_id[y, x]
190
+ coeff = self.coefficients[tri_idx]
191
+
192
+ # Apply affine transform: x_src = coeff[0] + coeff[1]*xi + coeff[2]*yi
193
+ self.map_x[y, x] = coeff[0] + coeff[1] * xi + coeff[2] * yi
194
+ self.map_y[y, x] = coeff[3] + coeff[4] * xi + coeff[5] * yi
195
+
196
+ @staticmethod
197
+ def _same_side(x0: float, y0: float, x1: float, y1: float,
198
+ x2: float, y2: float, x3: float, y3: float) -> bool:
199
+ """
200
+ Check if point (x0,y0) is on same side of line (x2,y2)-(x3,y3) as point (x1,y1).
201
+ Matches PAW::sameSide() in PAW.cpp lines 443-451.
202
+ """
203
+ x = (x3 - x2) * (y0 - y2) - (x0 - x2) * (y3 - y2)
204
+ y = (x3 - x2) * (y1 - y2) - (x1 - x2) * (y3 - y2)
205
+ return x * y >= 0
206
+
207
+ @staticmethod
208
+ def _point_in_triangle(x0: float, y0: float, x1: float, y1: float,
209
+ x2: float, y2: float, x3: float, y3: float) -> bool:
210
+ """
211
+ Check if point (x0,y0) is inside triangle (x1,y1)-(x2,y2)-(x3,y3).
212
+ Matches PAW::pointInTriangle() in PAW.cpp lines 454-461.
213
+ """
214
+ same_1 = PAW._same_side(x0, y0, x1, y1, x2, y2, x3, y3)
215
+ same_2 = PAW._same_side(x0, y0, x2, y2, x1, y1, x3, y3)
216
+ same_3 = PAW._same_side(x0, y0, x3, y3, x1, y1, x2, y2)
217
+ return same_1 and same_2 and same_3
218
+
219
+ def _find_triangle(self, x: float, y: float, guess: int = -1) -> int:
220
+ """
221
+ Find which triangle contains point (x, y).
222
+ Matches PAW::findTriangle() in PAW.cpp lines 465-515.
223
+
224
+ Args:
225
+ x, y: Point coordinates
226
+ guess: Previous triangle index for optimization
227
+
228
+ Returns:
229
+ Triangle index or -1 if point is outside all triangles
230
+ """
231
+ # Try guess first for speed
232
+ if guess != -1:
233
+ bounds = self.triangle_bounds[guess]
234
+ vertices = bounds['vertices']
235
+ if self._point_in_triangle(x, y, vertices[0][0], vertices[0][1],
236
+ vertices[1][0], vertices[1][1],
237
+ vertices[2][0], vertices[2][1]):
238
+ return guess
239
+
240
+ # Search all triangles
241
+ for tri_idx, bounds in enumerate(self.triangle_bounds):
242
+ # Quick bounding box check
243
+ if (x < bounds['min_x'] or x > bounds['max_x'] or
244
+ y < bounds['min_y'] or y > bounds['max_y']):
245
+ continue
246
+
247
+ # Precise point-in-triangle test
248
+ vertices = bounds['vertices']
249
+ if self._point_in_triangle(x, y, vertices[0][0], vertices[0][1],
250
+ vertices[1][0], vertices[1][1],
251
+ vertices[2][0], vertices[2][1]):
252
+ return tri_idx
253
+
254
+ return -1
255
+
256
+
257
+ def load_triangulation(filepath: str) -> np.ndarray:
258
+ """
259
+ Load triangulation file in OpenFace format.
260
+
261
+ Format:
262
+ Line 1: Number of triangles
263
+ Line 2: Number of columns (always 3)
264
+ Lines 3+: Triangle vertex indices (3 per line)
265
+
266
+ Args:
267
+ filepath: Path to triangulation file (e.g., tris_68_full.txt)
268
+
269
+ Returns:
270
+ (M, 3) array of triangle vertex indices
271
+ """
272
+ with open(filepath, 'r') as f:
273
+ lines = f.readlines()
274
+
275
+ num_tris = int(lines[0].strip())
276
+ num_cols = int(lines[1].strip())
277
+
278
+ assert num_cols == 3, f"Expected 3 columns, got {num_cols}"
279
+
280
+ triangulation = np.zeros((num_tris, 3), dtype=np.int32)
281
+ for i in range(num_tris):
282
+ parts = lines[i + 2].strip().split()
283
+ triangulation[i] = [int(parts[j]) for j in range(3)]
284
+
285
+ return triangulation
@@ -0,0 +1,19 @@
1
+ """
2
+ pyfaceau.data - Training data generation and storage for neural network training.
3
+
4
+ This package provides:
5
+ - HDF5 dataset storage for efficient training data access
6
+ - Training data generator to extract features from videos
7
+ - Quality filtering for training data
8
+ """
9
+
10
+ from .hdf5_dataset import TrainingDataset, TrainingDataWriter
11
+ from .training_data_generator import TrainingDataGenerator
12
+ from .quality_filter import QualityFilter
13
+
14
+ __all__ = [
15
+ 'TrainingDataset',
16
+ 'TrainingDataWriter',
17
+ 'TrainingDataGenerator',
18
+ 'QualityFilter',
19
+ ]