pyfaceau 1.0.6__cp312-cp312-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pyfaceau/__init__.py +19 -0
- pyfaceau/alignment/__init__.py +0 -0
- pyfaceau/alignment/calc_params.py +671 -0
- pyfaceau/alignment/face_aligner.py +352 -0
- pyfaceau/alignment/numba_calcparams_accelerator.py +244 -0
- pyfaceau/cython_histogram_median.cpython-312-darwin.so +0 -0
- pyfaceau/cython_rotation_update.cpython-312-darwin.so +0 -0
- pyfaceau/detectors/__init__.py +0 -0
- pyfaceau/detectors/pfld.py +128 -0
- pyfaceau/detectors/retinaface.py +352 -0
- pyfaceau/download_weights.py +134 -0
- pyfaceau/features/__init__.py +0 -0
- pyfaceau/features/histogram_median_tracker.py +335 -0
- pyfaceau/features/pdm.py +269 -0
- pyfaceau/features/triangulation.py +64 -0
- pyfaceau/parallel_pipeline.py +462 -0
- pyfaceau/pipeline.py +1083 -0
- pyfaceau/prediction/__init__.py +0 -0
- pyfaceau/prediction/au_predictor.py +434 -0
- pyfaceau/prediction/batched_au_predictor.py +269 -0
- pyfaceau/prediction/model_parser.py +337 -0
- pyfaceau/prediction/running_median.py +318 -0
- pyfaceau/prediction/running_median_fallback.py +200 -0
- pyfaceau/processor.py +270 -0
- pyfaceau/refinement/__init__.py +12 -0
- pyfaceau/refinement/svr_patch_expert.py +361 -0
- pyfaceau/refinement/targeted_refiner.py +362 -0
- pyfaceau/utils/__init__.py +0 -0
- pyfaceau/utils/cython_extensions/cython_histogram_median.c +35391 -0
- pyfaceau/utils/cython_extensions/cython_histogram_median.pyx +316 -0
- pyfaceau/utils/cython_extensions/cython_rotation_update.c +32262 -0
- pyfaceau/utils/cython_extensions/cython_rotation_update.pyx +211 -0
- pyfaceau/utils/cython_extensions/setup.py +47 -0
- pyfaceau-1.0.6.data/scripts/pyfaceau_gui.py +302 -0
- pyfaceau-1.0.6.dist-info/METADATA +466 -0
- pyfaceau-1.0.6.dist-info/RECORD +40 -0
- pyfaceau-1.0.6.dist-info/WHEEL +5 -0
- pyfaceau-1.0.6.dist-info/entry_points.txt +3 -0
- pyfaceau-1.0.6.dist-info/licenses/LICENSE +40 -0
- pyfaceau-1.0.6.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,335 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Histogram-Based Running Median Tracker
|
|
4
|
+
|
|
5
|
+
Implements OpenFace 2.2's histogram-based running median algorithm for
|
|
6
|
+
person-specific normalization. Matches the C++ implementation in
|
|
7
|
+
FaceAnalyser::UpdateRunningMedian().
|
|
8
|
+
|
|
9
|
+
Reference: OpenFace/lib/local/FaceAnalyser/src/FaceAnalyser.cpp:764-821
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import numpy as np
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class HistogramBasedMedianTracker:
|
|
16
|
+
"""
|
|
17
|
+
Histogram-based running median tracker matching OpenFace 2.2's implementation
|
|
18
|
+
|
|
19
|
+
Uses binned histograms to efficiently compute running median without storing
|
|
20
|
+
all historical values. More memory-efficient than rolling window approach.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
def __init__(self, feature_dim: int, num_bins: int = 200,
|
|
24
|
+
min_val: float = -3.0, max_val: float = 5.0):
|
|
25
|
+
"""
|
|
26
|
+
Initialize histogram-based median tracker
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
feature_dim: Dimensionality of feature vectors
|
|
30
|
+
num_bins: Number of histogram bins (OF2.2 uses 200)
|
|
31
|
+
min_val: Minimum value for histogram range
|
|
32
|
+
max_val: Maximum value for histogram range
|
|
33
|
+
"""
|
|
34
|
+
self.feature_dim = feature_dim
|
|
35
|
+
self.num_bins = num_bins
|
|
36
|
+
self.min_val = min_val
|
|
37
|
+
self.max_val = max_val
|
|
38
|
+
|
|
39
|
+
# Histogram: (feature_dim, num_bins)
|
|
40
|
+
# Each row tracks the distribution of one feature dimension
|
|
41
|
+
self.histogram = np.zeros((feature_dim, num_bins), dtype=np.int32)
|
|
42
|
+
|
|
43
|
+
# Current median estimate
|
|
44
|
+
self.current_median = np.zeros(feature_dim, dtype=np.float64)
|
|
45
|
+
|
|
46
|
+
# Total count of updates
|
|
47
|
+
self.hist_count = 0
|
|
48
|
+
|
|
49
|
+
# Precompute bin width
|
|
50
|
+
self.length = max_val - min_val
|
|
51
|
+
self.bin_width = self.length / num_bins
|
|
52
|
+
|
|
53
|
+
def update(self, features: np.ndarray, update_histogram: bool = True) -> None:
|
|
54
|
+
"""
|
|
55
|
+
Update tracker with new feature vector
|
|
56
|
+
|
|
57
|
+
Matches C++ UpdateRunningMedian() logic:
|
|
58
|
+
1. Bin each feature value into histogram
|
|
59
|
+
2. Update histogram counts
|
|
60
|
+
3. Recompute median from cumulative distribution
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
features: Feature vector (1D array)
|
|
64
|
+
update_histogram: Whether to update histogram (OF2.2 does this every 2nd frame)
|
|
65
|
+
"""
|
|
66
|
+
# Ensure 1D
|
|
67
|
+
if features.ndim == 2:
|
|
68
|
+
features = features.flatten()
|
|
69
|
+
|
|
70
|
+
assert features.shape[0] == self.feature_dim, \
|
|
71
|
+
f"Expected {self.feature_dim} features, got {features.shape[0]}"
|
|
72
|
+
|
|
73
|
+
if update_histogram:
|
|
74
|
+
# Convert feature values to bin indices
|
|
75
|
+
# Formula from C++: (descriptor - min_val) * num_bins / (max_val - min_val)
|
|
76
|
+
converted = (features - self.min_val) * self.num_bins / self.length
|
|
77
|
+
|
|
78
|
+
# Cap values to [0, num_bins-1] BEFORE casting to int (matches C++)
|
|
79
|
+
# C++ does: setTo(num_bins-1, converted > num_bins-1) then setTo(0, converted < 0)
|
|
80
|
+
converted = np.clip(converted, 0.0, float(self.num_bins - 1))
|
|
81
|
+
|
|
82
|
+
# Cast to int (truncation, matches C++ (int) cast)
|
|
83
|
+
converted = converted.astype(np.int32)
|
|
84
|
+
|
|
85
|
+
# Update histogram counts
|
|
86
|
+
for i in range(self.feature_dim):
|
|
87
|
+
bin_idx = converted[i]
|
|
88
|
+
self.histogram[i, bin_idx] += 1
|
|
89
|
+
|
|
90
|
+
self.hist_count += 1
|
|
91
|
+
|
|
92
|
+
# Compute median (matches C++: always sets median, even on frame 0)
|
|
93
|
+
# On frame 0 (hist_count==0), C++ sets median=descriptor.clone()
|
|
94
|
+
# On frame 1+ with hist_count==1, C++ also uses descriptor directly
|
|
95
|
+
# Otherwise, compute from histogram
|
|
96
|
+
if self.hist_count == 0:
|
|
97
|
+
# Frame 0: histogram not updated yet, use descriptor directly
|
|
98
|
+
self.current_median = features.copy()
|
|
99
|
+
elif self.hist_count == 1:
|
|
100
|
+
# Frame 1: histogram updated once, still use descriptor directly (matches C++)
|
|
101
|
+
self.current_median = features.copy()
|
|
102
|
+
else:
|
|
103
|
+
# Frame 2+: compute from histogram
|
|
104
|
+
self._compute_median()
|
|
105
|
+
|
|
106
|
+
def _compute_median(self, first_descriptor: np.ndarray = None) -> None:
|
|
107
|
+
"""
|
|
108
|
+
Compute median from histogram using cumulative sum
|
|
109
|
+
|
|
110
|
+
Matches C++ logic:
|
|
111
|
+
- If hist_count == 1: median = descriptor (special case)
|
|
112
|
+
- Otherwise: Find bin where cumulative sum reaches (hist_count+1)/2
|
|
113
|
+
- Convert bin index back to feature value
|
|
114
|
+
|
|
115
|
+
Args:
|
|
116
|
+
first_descriptor: If provided and hist_count==1, use directly as median
|
|
117
|
+
"""
|
|
118
|
+
# Special case: First frame (matches C++ if(hist_count == 1) { median = descriptor.clone(); })
|
|
119
|
+
if self.hist_count == 1 and first_descriptor is not None:
|
|
120
|
+
self.current_median = first_descriptor.copy()
|
|
121
|
+
return
|
|
122
|
+
|
|
123
|
+
cutoff_point = (self.hist_count + 1) // 2
|
|
124
|
+
|
|
125
|
+
for i in range(self.feature_dim):
|
|
126
|
+
cumulative_sum = 0
|
|
127
|
+
|
|
128
|
+
for j in range(self.num_bins):
|
|
129
|
+
cumulative_sum += self.histogram[i, j]
|
|
130
|
+
|
|
131
|
+
if cumulative_sum >= cutoff_point:
|
|
132
|
+
# Convert bin index back to value
|
|
133
|
+
# Formula from C++: min_val + bin_idx * bin_width + 0.5 * bin_width
|
|
134
|
+
self.current_median[i] = (
|
|
135
|
+
self.min_val +
|
|
136
|
+
j * self.bin_width +
|
|
137
|
+
0.5 * self.bin_width
|
|
138
|
+
)
|
|
139
|
+
break
|
|
140
|
+
|
|
141
|
+
def get_median(self) -> np.ndarray:
|
|
142
|
+
"""
|
|
143
|
+
Get current running median
|
|
144
|
+
|
|
145
|
+
Returns:
|
|
146
|
+
Median feature vector (1D array)
|
|
147
|
+
"""
|
|
148
|
+
return self.current_median.copy()
|
|
149
|
+
|
|
150
|
+
def reset(self) -> None:
|
|
151
|
+
"""Reset tracker (e.g., for new video)"""
|
|
152
|
+
self.histogram.fill(0)
|
|
153
|
+
self.current_median.fill(0.0)
|
|
154
|
+
self.hist_count = 0
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
class DualHistogramMedianTracker:
|
|
158
|
+
"""
|
|
159
|
+
Manages separate histogram-based running medians for HOG and geometric features
|
|
160
|
+
|
|
161
|
+
OF2.2 tracks HOG and geometric features separately with different histogram
|
|
162
|
+
parameters, then concatenates them when computing dynamic model predictions.
|
|
163
|
+
"""
|
|
164
|
+
|
|
165
|
+
def __init__(self,
|
|
166
|
+
hog_dim: int = 4464,
|
|
167
|
+
geom_dim: int = 238,
|
|
168
|
+
hog_bins: int = 200,
|
|
169
|
+
hog_min: float = -3.0,
|
|
170
|
+
hog_max: float = 5.0,
|
|
171
|
+
geom_bins: int = 200,
|
|
172
|
+
geom_min: float = -3.0,
|
|
173
|
+
geom_max: float = 5.0):
|
|
174
|
+
"""
|
|
175
|
+
Initialize dual histogram median tracker
|
|
176
|
+
|
|
177
|
+
Args:
|
|
178
|
+
hog_dim: HOG feature dimensionality
|
|
179
|
+
geom_dim: Geometric feature dimensionality
|
|
180
|
+
hog_bins: Number of bins for HOG histogram
|
|
181
|
+
hog_min: Minimum value for HOG histogram
|
|
182
|
+
hog_max: Maximum value for HOG histogram
|
|
183
|
+
geom_bins: Number of bins for geometric histogram
|
|
184
|
+
geom_min: Minimum value for geometric histogram
|
|
185
|
+
geom_max: Maximum value for geometric histogram
|
|
186
|
+
"""
|
|
187
|
+
self.hog_tracker = HistogramBasedMedianTracker(
|
|
188
|
+
hog_dim, hog_bins, hog_min, hog_max
|
|
189
|
+
)
|
|
190
|
+
self.geom_tracker = HistogramBasedMedianTracker(
|
|
191
|
+
geom_dim, geom_bins, geom_min, geom_max
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
# Track frame number to match C++ behavior (geometric updates every other frame)
|
|
195
|
+
self.frames_tracking = 0
|
|
196
|
+
|
|
197
|
+
def update(self, hog_features: np.ndarray, geom_features: np.ndarray,
|
|
198
|
+
update_histogram: bool = True) -> None:
|
|
199
|
+
"""
|
|
200
|
+
Update both trackers
|
|
201
|
+
|
|
202
|
+
CRITICAL: Matches C++ OpenFace 2.2 behavior (FaceAnalyser.cpp:400-428)
|
|
203
|
+
- BOTH HOG and geometric medians: updated every OTHER frame (when frames_tracking % 2 == 1)
|
|
204
|
+
- This is done as "a small speedup" optimization in C++
|
|
205
|
+
|
|
206
|
+
Args:
|
|
207
|
+
hog_features: HOG feature vector
|
|
208
|
+
geom_features: Geometric feature vector
|
|
209
|
+
update_histogram: Whether to update histograms
|
|
210
|
+
"""
|
|
211
|
+
# CRITICAL: C++ updates BOTH medians only on odd frames (line 400-428 in FaceAnalyser.cpp)
|
|
212
|
+
# if(frames_tracking % 2 == 1)
|
|
213
|
+
update_on_this_frame = update_histogram and (self.frames_tracking % 2 == 1)
|
|
214
|
+
|
|
215
|
+
# Update HOG tracker (only on odd frames)
|
|
216
|
+
self.hog_tracker.update(hog_features, update_on_this_frame)
|
|
217
|
+
|
|
218
|
+
# CRITICAL: OpenFace clamps HOG median to >= 0 after update (line 405 in FaceAnalyser.cpp)
|
|
219
|
+
# this->hog_desc_median.setTo(0, this->hog_desc_median < 0);
|
|
220
|
+
if update_on_this_frame:
|
|
221
|
+
self.hog_tracker.current_median[self.hog_tracker.current_median < 0] = 0.0
|
|
222
|
+
|
|
223
|
+
# Update geometric tracker (only on odd frames)
|
|
224
|
+
self.geom_tracker.update(geom_features, update_on_this_frame)
|
|
225
|
+
|
|
226
|
+
# Increment frame counter
|
|
227
|
+
self.frames_tracking += 1
|
|
228
|
+
|
|
229
|
+
def get_combined_median(self) -> np.ndarray:
|
|
230
|
+
"""
|
|
231
|
+
Get concatenated [HOG_median, geom_median]
|
|
232
|
+
|
|
233
|
+
Returns:
|
|
234
|
+
Combined median vector (4702 dims)
|
|
235
|
+
"""
|
|
236
|
+
hog_median = self.hog_tracker.get_median()
|
|
237
|
+
geom_median = self.geom_tracker.get_median()
|
|
238
|
+
return np.concatenate([hog_median, geom_median])
|
|
239
|
+
|
|
240
|
+
def get_hog_median(self) -> np.ndarray:
|
|
241
|
+
"""Get HOG median only"""
|
|
242
|
+
return self.hog_tracker.get_median()
|
|
243
|
+
|
|
244
|
+
def get_geom_median(self) -> np.ndarray:
|
|
245
|
+
"""Get geometric median only"""
|
|
246
|
+
return self.geom_tracker.get_median()
|
|
247
|
+
|
|
248
|
+
def reset(self) -> None:
|
|
249
|
+
"""Reset both trackers"""
|
|
250
|
+
self.hog_tracker.reset()
|
|
251
|
+
self.geom_tracker.reset()
|
|
252
|
+
self.frames_tracking = 0
|
|
253
|
+
|
|
254
|
+
|
|
255
|
+
def test_histogram_tracker():
|
|
256
|
+
"""Test histogram-based median tracker"""
|
|
257
|
+
print("="*80)
|
|
258
|
+
print("Histogram-Based Median Tracker - Test")
|
|
259
|
+
print("="*80)
|
|
260
|
+
|
|
261
|
+
# Create tracker
|
|
262
|
+
tracker = HistogramBasedMedianTracker(
|
|
263
|
+
feature_dim=10,
|
|
264
|
+
num_bins=200,
|
|
265
|
+
min_val=-3.0,
|
|
266
|
+
max_val=5.0
|
|
267
|
+
)
|
|
268
|
+
|
|
269
|
+
# Generate synthetic data (random walk around mean)
|
|
270
|
+
np.random.seed(42)
|
|
271
|
+
mean_features = np.array([0.0, 0.5, 1.0, -0.5, -1.0, 2.0, -2.0, 1.5, -1.5, 0.8])
|
|
272
|
+
|
|
273
|
+
print(f"\nSimulating 500 frames...")
|
|
274
|
+
print(f"True mean: {mean_features}")
|
|
275
|
+
|
|
276
|
+
for i in range(500):
|
|
277
|
+
# Add noise to mean
|
|
278
|
+
noise = np.random.randn(10) * 0.3
|
|
279
|
+
features = mean_features + noise
|
|
280
|
+
|
|
281
|
+
# Ensure values in histogram range
|
|
282
|
+
features = np.clip(features, -3.0, 5.0)
|
|
283
|
+
|
|
284
|
+
# Update tracker (update histogram every frame for testing)
|
|
285
|
+
tracker.update(features, update_histogram=True)
|
|
286
|
+
|
|
287
|
+
if i in [49, 99, 199, 499]:
|
|
288
|
+
median = tracker.get_median()
|
|
289
|
+
error = np.abs(median - mean_features).mean()
|
|
290
|
+
print(f"\nFrame {i+1}:")
|
|
291
|
+
print(f" True mean: {mean_features[:3]}...")
|
|
292
|
+
print(f" Running median: {median[:3]}...")
|
|
293
|
+
print(f" MAE: {error:.6f}")
|
|
294
|
+
|
|
295
|
+
print("\nTracker converges to true mean!")
|
|
296
|
+
|
|
297
|
+
# Test dual tracker
|
|
298
|
+
print("\n" + "="*80)
|
|
299
|
+
print("Dual Histogram Median Tracker - Test")
|
|
300
|
+
print("="*80)
|
|
301
|
+
|
|
302
|
+
dual_tracker = DualHistogramMedianTracker(
|
|
303
|
+
hog_dim=4464,
|
|
304
|
+
geom_dim=238,
|
|
305
|
+
hog_bins=200,
|
|
306
|
+
hog_min=-3.0,
|
|
307
|
+
hog_max=5.0,
|
|
308
|
+
geom_bins=200,
|
|
309
|
+
geom_min=-3.0,
|
|
310
|
+
geom_max=5.0
|
|
311
|
+
)
|
|
312
|
+
|
|
313
|
+
print("\nUpdating with synthetic HOG and geometric features...")
|
|
314
|
+
for i in range(100):
|
|
315
|
+
hog_feat = np.random.randn(4464) * 0.5
|
|
316
|
+
geom_feat = np.random.randn(238) * 0.5
|
|
317
|
+
|
|
318
|
+
# Clip to histogram range
|
|
319
|
+
hog_feat = np.clip(hog_feat, -3.0, 5.0)
|
|
320
|
+
geom_feat = np.clip(geom_feat, -3.0, 5.0)
|
|
321
|
+
|
|
322
|
+
dual_tracker.update(hog_feat, geom_feat, update_histogram=(i % 2 == 0))
|
|
323
|
+
|
|
324
|
+
combined = dual_tracker.get_combined_median()
|
|
325
|
+
print(f"Combined median shape: {combined.shape}")
|
|
326
|
+
print(f"Expected: (4702,)")
|
|
327
|
+
assert combined.shape == (4702,), f"Expected (4702,), got {combined.shape}"
|
|
328
|
+
|
|
329
|
+
print("\n" + "="*80)
|
|
330
|
+
print("All tests passed!")
|
|
331
|
+
print("="*80)
|
|
332
|
+
|
|
333
|
+
|
|
334
|
+
if __name__ == "__main__":
|
|
335
|
+
test_histogram_tracker()
|
pyfaceau/features/pdm.py
ADDED
|
@@ -0,0 +1,269 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
OpenFace 2.2 PDM (Point Distribution Model) Parser
|
|
4
|
+
|
|
5
|
+
Parses text-based PDM files containing mean landmark positions and
|
|
6
|
+
principal components for facial shape reconstruction.
|
|
7
|
+
|
|
8
|
+
Format:
|
|
9
|
+
1. Mean values section: (204,) - average 3D landmark positions
|
|
10
|
+
2. Principal components section: (204, 34) - eigenvectors for reconstruction
|
|
11
|
+
|
|
12
|
+
Usage:
|
|
13
|
+
pdm = PDMParser("In-the-wild_aligned_PDM_68.txt")
|
|
14
|
+
reconstructed_landmarks = pdm.reconstruct_from_params(pdm_params)
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
import numpy as np
|
|
18
|
+
from pathlib import Path
|
|
19
|
+
from typing import Tuple
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class PDMParser:
|
|
23
|
+
"""Parser for OpenFace 2.2 PDM files"""
|
|
24
|
+
|
|
25
|
+
def __init__(self, pdm_file_path: str):
|
|
26
|
+
"""
|
|
27
|
+
Initialize parser and load PDM
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
pdm_file_path: Path to PDM .txt file
|
|
31
|
+
"""
|
|
32
|
+
self.pdm_file_path = Path(pdm_file_path)
|
|
33
|
+
if not self.pdm_file_path.exists():
|
|
34
|
+
raise FileNotFoundError(f"PDM file not found: {pdm_file_path}")
|
|
35
|
+
|
|
36
|
+
# Load PDM components
|
|
37
|
+
self.mean_shape, self.princ_comp, self.eigen_values = self._parse_pdm()
|
|
38
|
+
|
|
39
|
+
print(f"Loaded PDM from {self.pdm_file_path.name}")
|
|
40
|
+
print(f" Mean shape: {self.mean_shape.shape}")
|
|
41
|
+
print(f" Principal components: {self.princ_comp.shape}")
|
|
42
|
+
print(f" Eigenvalues: {self.eigen_values.shape}")
|
|
43
|
+
|
|
44
|
+
def _parse_pdm(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
|
|
45
|
+
"""
|
|
46
|
+
Parse PDM file
|
|
47
|
+
|
|
48
|
+
Returns:
|
|
49
|
+
Tuple of (mean_shape, principal_components, eigen_values)
|
|
50
|
+
- mean_shape: (204, 1) array of mean landmark positions
|
|
51
|
+
- principal_components: (204, 34) matrix of eigenvectors
|
|
52
|
+
- eigen_values: (34,) array of eigenvalues (variances)
|
|
53
|
+
"""
|
|
54
|
+
with open(self.pdm_file_path, 'r') as f:
|
|
55
|
+
lines = f.readlines()
|
|
56
|
+
|
|
57
|
+
# Parse mean values section
|
|
58
|
+
mean_shape = self._parse_matrix_section(lines, 0)
|
|
59
|
+
|
|
60
|
+
# Find principal components section
|
|
61
|
+
# Look for the comment line that starts the PC section
|
|
62
|
+
pc_start = None
|
|
63
|
+
for i, line in enumerate(lines):
|
|
64
|
+
if 'principal components' in line.lower() and 'eigenvectors' in line.lower():
|
|
65
|
+
pc_start = i
|
|
66
|
+
break
|
|
67
|
+
|
|
68
|
+
if pc_start is None:
|
|
69
|
+
raise ValueError("Principal components section not found in PDM file")
|
|
70
|
+
|
|
71
|
+
# Parse principal components section
|
|
72
|
+
princ_comp = self._parse_matrix_section(lines, pc_start)
|
|
73
|
+
|
|
74
|
+
# Find eigenvalues section
|
|
75
|
+
# Look for the comment line that starts the eigenvalues section
|
|
76
|
+
eigen_start = None
|
|
77
|
+
for i, line in enumerate(lines):
|
|
78
|
+
if 'eigenvalues' in line.lower() and 'variances' in line.lower():
|
|
79
|
+
eigen_start = i
|
|
80
|
+
break
|
|
81
|
+
|
|
82
|
+
if eigen_start is None:
|
|
83
|
+
raise ValueError("Eigenvalues section not found in PDM file")
|
|
84
|
+
|
|
85
|
+
# Parse eigenvalues section
|
|
86
|
+
eigen_values = self._parse_matrix_section(lines, eigen_start)
|
|
87
|
+
eigen_values = eigen_values.flatten() # Convert to 1D array
|
|
88
|
+
|
|
89
|
+
return mean_shape, princ_comp, eigen_values
|
|
90
|
+
|
|
91
|
+
def _parse_matrix_section(self, lines, start_idx):
|
|
92
|
+
"""
|
|
93
|
+
Parse a matrix section starting at given line index
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
lines: All lines from file
|
|
97
|
+
start_idx: Index of comment line before matrix
|
|
98
|
+
|
|
99
|
+
Returns:
|
|
100
|
+
numpy array with the matrix data
|
|
101
|
+
"""
|
|
102
|
+
# Skip comment line
|
|
103
|
+
idx = start_idx + 1
|
|
104
|
+
|
|
105
|
+
# Read dimensions
|
|
106
|
+
rows = int(lines[idx].strip())
|
|
107
|
+
idx += 1
|
|
108
|
+
|
|
109
|
+
cols = int(lines[idx].strip())
|
|
110
|
+
idx += 1
|
|
111
|
+
|
|
112
|
+
# Skip dtype line
|
|
113
|
+
idx += 1
|
|
114
|
+
|
|
115
|
+
# Read matrix data
|
|
116
|
+
data = []
|
|
117
|
+
values_read = 0
|
|
118
|
+
total_values = rows * cols
|
|
119
|
+
|
|
120
|
+
while values_read < total_values and idx < len(lines):
|
|
121
|
+
line = lines[idx].strip()
|
|
122
|
+
if line and not line.startswith('#'):
|
|
123
|
+
# Split on whitespace and convert to float
|
|
124
|
+
values = [float(x) for x in line.split()]
|
|
125
|
+
data.extend(values)
|
|
126
|
+
values_read += len(values)
|
|
127
|
+
idx += 1
|
|
128
|
+
|
|
129
|
+
if len(data) != total_values:
|
|
130
|
+
raise ValueError(
|
|
131
|
+
f"Expected {total_values} values, got {len(data)}"
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
# Reshape to matrix
|
|
135
|
+
matrix = np.array(data, dtype=np.float64).reshape(rows, cols)
|
|
136
|
+
|
|
137
|
+
return matrix
|
|
138
|
+
|
|
139
|
+
def reconstruct_from_params(self, pdm_params: np.ndarray) -> np.ndarray:
|
|
140
|
+
"""
|
|
141
|
+
Reconstruct 3D landmarks from PDM parameters
|
|
142
|
+
|
|
143
|
+
This matches OpenFace 2.2's reconstruction:
|
|
144
|
+
reconstructed_landmarks = princ_comp × pdm_params
|
|
145
|
+
|
|
146
|
+
Args:
|
|
147
|
+
pdm_params: (34,) array of PDM parameters from CSV (p_0...p_33)
|
|
148
|
+
|
|
149
|
+
Returns:
|
|
150
|
+
(204,) array of reconstructed 3D landmarks
|
|
151
|
+
Format: [X_0...X_67, Y_0...Y_67, Z_0...Z_67]
|
|
152
|
+
"""
|
|
153
|
+
if pdm_params.ndim == 2:
|
|
154
|
+
pdm_params = pdm_params.flatten()
|
|
155
|
+
|
|
156
|
+
if pdm_params.shape[0] != self.princ_comp.shape[1]:
|
|
157
|
+
raise ValueError(
|
|
158
|
+
f"Expected {self.princ_comp.shape[1]} PDM params, "
|
|
159
|
+
f"got {pdm_params.shape[0]}"
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
# Reconstruct: princ_comp (204, 34) × params (34,) = (204,)
|
|
163
|
+
reconstructed = np.dot(self.princ_comp, pdm_params)
|
|
164
|
+
|
|
165
|
+
return reconstructed
|
|
166
|
+
|
|
167
|
+
def extract_geometric_features(self, pdm_params: np.ndarray) -> np.ndarray:
|
|
168
|
+
"""
|
|
169
|
+
Extract geometric features for AU prediction
|
|
170
|
+
|
|
171
|
+
Matches OpenFace 2.2's geom_descriptor_frame construction:
|
|
172
|
+
1. Reconstruct landmarks from PDM
|
|
173
|
+
2. Concatenate: [reconstructed_landmarks, pdm_params]
|
|
174
|
+
|
|
175
|
+
Args:
|
|
176
|
+
pdm_params: (34,) array of PDM parameters
|
|
177
|
+
|
|
178
|
+
Returns:
|
|
179
|
+
Geometric feature vector matching OF2.2 format
|
|
180
|
+
"""
|
|
181
|
+
# Reconstruct landmarks
|
|
182
|
+
reconstructed_landmarks = self.reconstruct_from_params(pdm_params)
|
|
183
|
+
|
|
184
|
+
# Concatenate: [landmarks, params]
|
|
185
|
+
geom_features = np.concatenate([reconstructed_landmarks, pdm_params])
|
|
186
|
+
|
|
187
|
+
return geom_features
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
def test_pdm_parser():
|
|
191
|
+
"""Test PDM parser"""
|
|
192
|
+
import pandas as pd
|
|
193
|
+
|
|
194
|
+
print("="*80)
|
|
195
|
+
print("PDM Parser - Test")
|
|
196
|
+
print("="*80)
|
|
197
|
+
|
|
198
|
+
# Path to PDM file
|
|
199
|
+
pdm_file = "/Users/johnwilsoniv/repo/fea_tool/external_libs/openFace/OpenFace/lib/local/FaceAnalyser/AU_predictors/In-the-wild_aligned_PDM_68.txt"
|
|
200
|
+
|
|
201
|
+
# Load PDM
|
|
202
|
+
print("\nLoading PDM...")
|
|
203
|
+
pdm = PDMParser(pdm_file)
|
|
204
|
+
|
|
205
|
+
# Load CSV to get PDM params
|
|
206
|
+
csv_file = "/Users/johnwilsoniv/Documents/SplitFace Open3/S1 Face Mirror/of22_validation/IMG_0942_left_mirrored.csv"
|
|
207
|
+
print(f"\nLoading CSV: {csv_file}")
|
|
208
|
+
df = pd.read_csv(csv_file)
|
|
209
|
+
|
|
210
|
+
# Extract PDM params from first frame
|
|
211
|
+
pdm_cols = [f'p_{i}' for i in range(34)]
|
|
212
|
+
pdm_params = df.iloc[0][pdm_cols].values
|
|
213
|
+
|
|
214
|
+
print(f"\nPDM params from frame 0: {pdm_params.shape}")
|
|
215
|
+
print(f" First 5 values: {pdm_params[:5]}")
|
|
216
|
+
|
|
217
|
+
# Reconstruct landmarks
|
|
218
|
+
print("\nReconstructing landmarks...")
|
|
219
|
+
reconstructed = pdm.reconstruct_from_params(pdm_params)
|
|
220
|
+
print(f"Reconstructed landmarks: {reconstructed.shape}")
|
|
221
|
+
print(f" First 5 values: {reconstructed[:5]}")
|
|
222
|
+
|
|
223
|
+
# Check range
|
|
224
|
+
print(f"\nReconstructed landmark statistics:")
|
|
225
|
+
print(f" Min: {reconstructed.min():.2f}")
|
|
226
|
+
print(f" Max: {reconstructed.max():.2f}")
|
|
227
|
+
print(f" Mean: {reconstructed.mean():.2f}")
|
|
228
|
+
print(f" Std: {reconstructed.std():.2f}")
|
|
229
|
+
|
|
230
|
+
# Compare to raw landmarks
|
|
231
|
+
X_cols = [f'X_{i}' for i in range(68)]
|
|
232
|
+
Y_cols = [f'Y_{i}' for i in range(68)]
|
|
233
|
+
Z_cols = [f'Z_{i}' for i in range(68)]
|
|
234
|
+
|
|
235
|
+
raw_landmarks = np.concatenate([
|
|
236
|
+
df.iloc[0][X_cols].values,
|
|
237
|
+
df.iloc[0][Y_cols].values,
|
|
238
|
+
df.iloc[0][Z_cols].values
|
|
239
|
+
])
|
|
240
|
+
|
|
241
|
+
print(f"\nRaw landmark statistics (from CSV):")
|
|
242
|
+
print(f" Min: {raw_landmarks.min():.2f}")
|
|
243
|
+
print(f" Max: {raw_landmarks.max():.2f}")
|
|
244
|
+
print(f" Mean: {raw_landmarks.mean():.2f}")
|
|
245
|
+
print(f" Std: {raw_landmarks.std():.2f}")
|
|
246
|
+
|
|
247
|
+
# Extract geometric features
|
|
248
|
+
print("\nExtracting geometric features...")
|
|
249
|
+
geom_features = pdm.extract_geometric_features(pdm_params)
|
|
250
|
+
print(f"Geometric features: {geom_features.shape}")
|
|
251
|
+
print(f" Expected: (238,) = 204 (landmarks) + 34 (params)")
|
|
252
|
+
|
|
253
|
+
# Check if reconstructed landmarks fit in histogram range [-60, 60]
|
|
254
|
+
in_range = np.sum((reconstructed >= -60) & (reconstructed <= 60))
|
|
255
|
+
total = reconstructed.size
|
|
256
|
+
pct = 100.0 * in_range / total
|
|
257
|
+
|
|
258
|
+
print(f"\nHistogram range check [-60, 60]:")
|
|
259
|
+
print(f" Values in range: {in_range}/{total} ({pct:.2f}%)")
|
|
260
|
+
print(f" Values below -60: {np.sum(reconstructed < -60)}")
|
|
261
|
+
print(f" Values above 60: {np.sum(reconstructed > 60)}")
|
|
262
|
+
|
|
263
|
+
print("\n" + "="*80)
|
|
264
|
+
print("Test complete!")
|
|
265
|
+
print("="*80)
|
|
266
|
+
|
|
267
|
+
|
|
268
|
+
if __name__ == "__main__":
|
|
269
|
+
test_pdm_parser()
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Parse OpenFace triangulation file for face masking
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import numpy as np
|
|
7
|
+
|
|
8
|
+
class TriangulationParser:
|
|
9
|
+
"""Parser for OpenFace tris_68.txt triangulation data"""
|
|
10
|
+
|
|
11
|
+
def __init__(self, tris_file: str):
|
|
12
|
+
"""
|
|
13
|
+
Load triangulation from OpenFace format file
|
|
14
|
+
|
|
15
|
+
Args:
|
|
16
|
+
tris_file: Path to tris_68.txt file
|
|
17
|
+
"""
|
|
18
|
+
with open(tris_file, 'r') as f:
|
|
19
|
+
lines = [line.strip() for line in f if line.strip() and not line.startswith('#')]
|
|
20
|
+
|
|
21
|
+
# Parse file format:
|
|
22
|
+
# Line 0: Total number of triangle lines (111)
|
|
23
|
+
# Line 1: Number of triangulation sets (3)
|
|
24
|
+
# Line 2: Dimension (4)
|
|
25
|
+
# Line 3+: Triangle definitions (vertex indices)
|
|
26
|
+
|
|
27
|
+
total_triangles = int(lines[0])
|
|
28
|
+
|
|
29
|
+
# Read all triangles starting from line 3
|
|
30
|
+
triangles = []
|
|
31
|
+
for i in range(3, len(lines)): # Start from line 3, skip header
|
|
32
|
+
tri = list(map(int, lines[i].split()))
|
|
33
|
+
if len(tri) == 3: # Valid triangle
|
|
34
|
+
triangles.append(tri)
|
|
35
|
+
|
|
36
|
+
self.triangles = np.array(triangles, dtype=np.int32)
|
|
37
|
+
|
|
38
|
+
print(f"Loaded {len(self.triangles)} triangles from {tris_file}")
|
|
39
|
+
|
|
40
|
+
def create_face_mask(self, landmarks: np.ndarray, img_width: int, img_height: int) -> np.ndarray:
|
|
41
|
+
"""
|
|
42
|
+
Create binary mask for face region using triangulation
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
landmarks: (68, 2) array of facial landmark coordinates
|
|
46
|
+
img_width: Mask width in pixels
|
|
47
|
+
img_height: Mask height in pixels
|
|
48
|
+
|
|
49
|
+
Returns:
|
|
50
|
+
(height, width) binary mask (0=background, 255=face)
|
|
51
|
+
"""
|
|
52
|
+
import cv2
|
|
53
|
+
|
|
54
|
+
mask = np.zeros((img_height, img_width), dtype=np.uint8)
|
|
55
|
+
|
|
56
|
+
# Fill each triangle
|
|
57
|
+
for tri in self.triangles:
|
|
58
|
+
# Get the three vertices for this triangle
|
|
59
|
+
pts = landmarks[tri].astype(np.int32)
|
|
60
|
+
|
|
61
|
+
# Fill the triangle
|
|
62
|
+
cv2.fillConvexPoly(mask, pts, 255)
|
|
63
|
+
|
|
64
|
+
return mask
|