alignfaces 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alignfaces/__init__.py +15 -0
- alignfaces/aperture_tools.py +213 -0
- alignfaces/contrast_tools.py +106 -0
- alignfaces/contrast_tools_.py +106 -0
- alignfaces/data/shape_predictor_68_face_landmarks.dat +0 -0
- alignfaces/face_landmarks.py +233 -0
- alignfaces/make_aligned_faces.py +1217 -0
- alignfaces/make_aligned_faces_.py +1209 -0
- alignfaces/make_files.py +42 -0
- alignfaces/make_files_.py +42 -0
- alignfaces/make_files_OLD.py +86 -0
- alignfaces/phase_cong_3.py +524 -0
- alignfaces/plot_tools.py +170 -0
- alignfaces/procrustes_tools.py +217 -0
- alignfaces/tests/R/align_reference.csv +1 -0
- alignfaces/tests/R/align_shapes.csv +40 -0
- alignfaces/tests/R/input_shapes.csv +40 -0
- alignfaces/tests/__init__.py +0 -0
- alignfaces/tests/_test_pawarp.py +267 -0
- alignfaces/tests/test_procrustes_tools.py +569 -0
- alignfaces/tests/test_warp_tools.py +316 -0
- alignfaces/warp_tools.py +279 -0
- alignfaces-1.0.1.dist-info/METADATA +135 -0
- alignfaces-1.0.1.dist-info/RECORD +27 -0
- alignfaces-1.0.1.dist-info/WHEEL +5 -0
- alignfaces-1.0.1.dist-info/licenses/LICENSE.txt +13 -0
- alignfaces-1.0.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,267 @@
|
|
|
1
|
+
from alignfaces2.warp_tools import pawarp
|
|
2
|
+
from numpy.fft import fft2, ifft2
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
5
|
+
import sys # for exception handling
|
|
6
|
+
# import matplotlib.pyplot as plt
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
# TO DO
|
|
10
|
+
# Loop through many and record.
|
|
11
|
+
# Set criteria appropriately.
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
# -----------------------------------------------------------------------------
|
|
15
|
+
# Helper functions
|
|
16
|
+
|
|
17
|
+
def random_locations(im_size, num_loc=4, min_dist=5):
|
|
18
|
+
"""create array of num_loc points within min distance of min_dist pixels"""
|
|
19
|
+
PAD = int(im_size * .15)
|
|
20
|
+
locations = (np.random.rand(1, 2) * (im_size-1-PAD*2)+PAD).astype(int)
|
|
21
|
+
total = 1
|
|
22
|
+
while total < num_loc:
|
|
23
|
+
# Sample new point.
|
|
24
|
+
loc = (np.random.rand(1, 2) * (im_size-1-PAD*2)+PAD).astype(int)
|
|
25
|
+
|
|
26
|
+
SAMPLE_NEW = 0;
|
|
27
|
+
for SETLOC in locations:
|
|
28
|
+
if np.sqrt(((SETLOC-loc)**2).sum()) <= min_dist:
|
|
29
|
+
# Bad sample. Skip to beginning of loop for new sample.
|
|
30
|
+
SAMPLE_NEW = 1;
|
|
31
|
+
if SAMPLE_NEW:
|
|
32
|
+
continue
|
|
33
|
+
|
|
34
|
+
# Good sample. Append to results.
|
|
35
|
+
locations = np.r_[locations, loc]
|
|
36
|
+
total += 1
|
|
37
|
+
IND = locations_to_indices(im_size, locations)
|
|
38
|
+
locations = indices_to_locations(im_size, IND)
|
|
39
|
+
return locations, IND
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def perturb_locations(locations, max_value, change_range):
|
|
43
|
+
num_targets = locations.shape[0]
|
|
44
|
+
noise = np.random.rand(num_targets, 2)
|
|
45
|
+
other_locations = ((noise-.5) * change_range + locations).astype(int)
|
|
46
|
+
other_locations[other_locations < 0] = 0
|
|
47
|
+
other_locations[other_locations > max_value] = max_value
|
|
48
|
+
return other_locations
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def image_with_donut_targets(im_size=64, locations=((8, 8),(32, 32))):
|
|
52
|
+
"""make zero image with targets at locations. also return target"""
|
|
53
|
+
# Target
|
|
54
|
+
B = np.zeros((3, 3))
|
|
55
|
+
|
|
56
|
+
B[1, 1] = 9 * 8
|
|
57
|
+
B[0, 1] = -9
|
|
58
|
+
B[2, 1] = -9
|
|
59
|
+
B[1, 0] = -9
|
|
60
|
+
B[1, 2] = -9
|
|
61
|
+
|
|
62
|
+
B[0, 0] = -9
|
|
63
|
+
B[0, 2] = -9
|
|
64
|
+
B[2, 0] = -9
|
|
65
|
+
B[2, 2] = -9
|
|
66
|
+
|
|
67
|
+
# Image with targets
|
|
68
|
+
A = np.zeros((im_size, im_size))
|
|
69
|
+
for l in locations:
|
|
70
|
+
tr, tc = l
|
|
71
|
+
A[1 + tr - 1, 1 + tc - 1] = B[1, 1]
|
|
72
|
+
A[0 + tr - 1, 1 + tc - 1] = B[0, 1]
|
|
73
|
+
A[2 + tr - 1, 1 + tc - 1] = B[2, 1]
|
|
74
|
+
A[1 + tr - 1, 0 + tc - 1] = B[1, 0]
|
|
75
|
+
A[1 + tr - 1, 2 + tc - 1] = B[1, 2]
|
|
76
|
+
|
|
77
|
+
A[0 + tr - 1, 0 + tc - 1] = B[0, 0]
|
|
78
|
+
A[0 + tr - 1, 2 + tc - 1] = B[0, 2]
|
|
79
|
+
A[2 + tr - 1, 0 + tc - 1] = B[2, 0]
|
|
80
|
+
A[2 + tr - 1, 2 + tc - 1] = B[2, 2]
|
|
81
|
+
|
|
82
|
+
img, target = A, B
|
|
83
|
+
return img, target
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def fft_convolve2d(x, y):
|
|
87
|
+
""" 2D convolution, using FFT"""
|
|
88
|
+
pad = np.array(x.shape) - np.array(y.shape)
|
|
89
|
+
if pad[0] % 2 == 0:
|
|
90
|
+
rb, ra = int(pad[0]/2)+1, int(pad[0]/2)-1
|
|
91
|
+
else:
|
|
92
|
+
rb, ra = int(np.ceil(pad[0]/2)), int(np.floor(pad[0]/2))
|
|
93
|
+
if pad[1] % 2 == 0:
|
|
94
|
+
cb, ca = int(pad[1]/2)+1, int(pad[1]/2)-1
|
|
95
|
+
else:
|
|
96
|
+
cb, ca = int(np.ceil(pad[1]/2)), int(np.floor(pad[1]/2))
|
|
97
|
+
pad_width = ((rb, ra), (cb, ca))
|
|
98
|
+
py = np.pad(y, pad_width, mode="constant")
|
|
99
|
+
|
|
100
|
+
fr = fft2(x)
|
|
101
|
+
fr2 = fft2(np.flipud(np.fliplr(py)))
|
|
102
|
+
m,n = fr.shape
|
|
103
|
+
cc = np.real(ifft2(fr*fr2))
|
|
104
|
+
cc = np.roll(cc, int(-m/2+1), axis=0)
|
|
105
|
+
cc = np.roll(cc, int(-n/2+1), axis=1)
|
|
106
|
+
return cc
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
def locations_to_indices(img_width, locations):
|
|
110
|
+
num_targets = locations.shape[0]
|
|
111
|
+
IND = np.zeros(num_targets, )
|
|
112
|
+
for i, rc in enumerate(locations):
|
|
113
|
+
r, c = rc
|
|
114
|
+
this_ind = r * img_width + c
|
|
115
|
+
IND[i] = this_ind
|
|
116
|
+
IND.sort()
|
|
117
|
+
return IND.astype(int)
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def indices_to_locations(img_width, IND):
|
|
121
|
+
IND.sort()
|
|
122
|
+
num_targets = IND.size
|
|
123
|
+
SUB = np.zeros((num_targets, 2), dtype=int)
|
|
124
|
+
for i, ind in enumerate(IND):
|
|
125
|
+
r = int(np.floor(ind / img_width))
|
|
126
|
+
c = int(ind % img_width)
|
|
127
|
+
SUB[i, :] = [r, c]
|
|
128
|
+
return SUB
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def top_n_locations(C, num_targets):
|
|
132
|
+
cv = np.copy(C).flatten()
|
|
133
|
+
si = cv.argsort()
|
|
134
|
+
IND = si[-num_targets:]
|
|
135
|
+
IND.sort()
|
|
136
|
+
return IND
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def top_n_locations_robust(C, num_targets):
|
|
140
|
+
CC = np.copy(C)
|
|
141
|
+
SUB = np.zeros((num_targets, 2), dtype=int)
|
|
142
|
+
for i in range(num_targets):
|
|
143
|
+
MC = np.where(CC==CC.max())
|
|
144
|
+
r, c = MC[0][0], MC[1][0]
|
|
145
|
+
SUB[i, :] = [r, c]
|
|
146
|
+
# set 5 x 5 area centered on (r, c) to 0
|
|
147
|
+
CC[r-1, c-1] = 0
|
|
148
|
+
CC[r-1, c+0] = 0
|
|
149
|
+
CC[r-1, c+1] = 0
|
|
150
|
+
CC[r+0, c-1] = 0
|
|
151
|
+
CC[r+0, c+0] = 0
|
|
152
|
+
CC[r+0, c+1] = 0
|
|
153
|
+
CC[r+1, c-1] = 0
|
|
154
|
+
CC[r+1, c+0] = 0
|
|
155
|
+
CC[r+1, c+1] = 0
|
|
156
|
+
|
|
157
|
+
CC[r-2, c-2] = 0
|
|
158
|
+
CC[r-2, c-1] = 0
|
|
159
|
+
CC[r-2, c-0] = 0
|
|
160
|
+
CC[r-2, c+1] = 0
|
|
161
|
+
CC[r-2, c+2] = 0
|
|
162
|
+
|
|
163
|
+
CC[r-1, c-2] = 0
|
|
164
|
+
CC[r-1, c+2] = 0
|
|
165
|
+
|
|
166
|
+
CC[r-0, c-2] = 0
|
|
167
|
+
CC[r-0, c+2] = 0
|
|
168
|
+
|
|
169
|
+
CC[r+1, c-2] = 0
|
|
170
|
+
CC[r+1, c+2] = 0
|
|
171
|
+
|
|
172
|
+
CC[r+2, c-2] = 0
|
|
173
|
+
CC[r+2, c-1] = 0
|
|
174
|
+
CC[r+2, c-0] = 0
|
|
175
|
+
CC[r+2, c+1] = 0
|
|
176
|
+
CC[r+2, c+2] = 0
|
|
177
|
+
IND = locations_to_indices(CC.shape[1], SUB)
|
|
178
|
+
SUB = indices_to_locations(CC.shape[1], IND)
|
|
179
|
+
return SUB, IND
|
|
180
|
+
|
|
181
|
+
|
|
182
|
+
def distances_of_best_matched_points(locations, top_locations):
|
|
183
|
+
# every location matched with every estimated location
|
|
184
|
+
num_targets = locations.shape[0]
|
|
185
|
+
O = locations
|
|
186
|
+
N = top_locations
|
|
187
|
+
DELTA = np.kron(O, np.ones((num_targets, 1))) - np.tile(N, (num_targets, 1))
|
|
188
|
+
DIST = np.sqrt((DELTA**2).sum(axis=1))
|
|
189
|
+
D = DIST.reshape((num_targets, num_targets))
|
|
190
|
+
|
|
191
|
+
# distances between points and estimated points giving best-match
|
|
192
|
+
MIN_D = np.zeros((num_targets,))
|
|
193
|
+
for di in range(num_targets):
|
|
194
|
+
MIN_D[di] = D.min();
|
|
195
|
+
locs = np.where(D==D.min())
|
|
196
|
+
R, C = locs[0][0], locs[1][0]
|
|
197
|
+
D[R,:] = np.inf
|
|
198
|
+
D[:,C] = np.inf
|
|
199
|
+
return MIN_D
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
# -----------------------------------------------------------------------------
|
|
203
|
+
# Main
|
|
204
|
+
|
|
205
|
+
# Parameters - input image with targets centered on landmarks (locations)
|
|
206
|
+
im_size = 128 # length of square image, pixels
|
|
207
|
+
num_targets = 3 # number of targets
|
|
208
|
+
min_target_dist = 32 # minimum distance between landmarks, pixels
|
|
209
|
+
|
|
210
|
+
# Parameters - warped version of input image
|
|
211
|
+
change_range = 8 # range of random shift in landmark position, pixels
|
|
212
|
+
max_value=im_size-1 # maximum x and y position. minimum always 0.
|
|
213
|
+
|
|
214
|
+
# Parameters - test
|
|
215
|
+
iterations = 10000
|
|
216
|
+
ALL_D = np.zeros((iterations, num_targets))
|
|
217
|
+
|
|
218
|
+
# Perform multiple tests.
|
|
219
|
+
for this_it in range(iterations):
|
|
220
|
+
# Create image with cross-shaped targets
|
|
221
|
+
locations, target_indices = random_locations(im_size, num_targets, min_target_dist)
|
|
222
|
+
img, target = image_with_donut_targets(im_size, locations)
|
|
223
|
+
|
|
224
|
+
# Ensure that all helper functions are working properly.
|
|
225
|
+
# Should be able to find exact target positions using convolution.
|
|
226
|
+
C = fft_convolve2d(img, target)
|
|
227
|
+
top_indices = top_n_locations(C, num_targets)
|
|
228
|
+
working_functions = (top_indices==target_indices).all()
|
|
229
|
+
assert working_functions, "Helper functions do not work ❌: Cannot interpret test."
|
|
230
|
+
|
|
231
|
+
# Warp centers of targets to slightly different locations
|
|
232
|
+
new_locations = perturb_locations(locations, max_value, change_range)
|
|
233
|
+
wimg, tri, inpix, fwdwarpix = pawarp(img, base=new_locations, target=locations, interp='bilin')
|
|
234
|
+
|
|
235
|
+
# Recover the original image by warping back
|
|
236
|
+
wwimg, tri, inpix, fwdwarpix = pawarp(wimg, base=locations, target=new_locations, interp='bilin')
|
|
237
|
+
|
|
238
|
+
# Look for the targets in the recovered image
|
|
239
|
+
C = fft_convolve2d(wwimg, target)
|
|
240
|
+
estimated_locations, top_indices = top_n_locations_robust(C, num_targets)
|
|
241
|
+
|
|
242
|
+
# plt_1 = plt.figure(figsize=(10,10))
|
|
243
|
+
# plt.imshow(wwimg, cmap="gray")
|
|
244
|
+
# plt.plot(locations[:,1], locations[:,0], 'g.', markersize=24, alpha=0.5)
|
|
245
|
+
# plt.plot(estimated_locations[:,1], estimated_locations[:,0], 'r+', markersize=24, alpha=0.5)
|
|
246
|
+
# plt.show()
|
|
247
|
+
|
|
248
|
+
best_distances = distances_of_best_matched_points(locations, estimated_locations)
|
|
249
|
+
# print("Distances of best-matched points: ", best_distances)
|
|
250
|
+
print(this_it)
|
|
251
|
+
|
|
252
|
+
ALL_D[this_it, :] = best_distances
|
|
253
|
+
|
|
254
|
+
# Evaluate
|
|
255
|
+
ALL_D = ALL_D.flatten()
|
|
256
|
+
Q95_less_than_1_pixel = np.quantile(ALL_D, 0.95) < 1
|
|
257
|
+
max_offset_less_than_perturbation = ALL_D.max() < change_range
|
|
258
|
+
PASS = Q95_less_than_1_pixel and max_offset_less_than_perturbation
|
|
259
|
+
assert PASS, "Fail❌"
|
|
260
|
+
|
|
261
|
+
print("Passed 🚀")
|
|
262
|
+
|
|
263
|
+
# Passed with iterations == 10000 😊
|
|
264
|
+
|
|
265
|
+
# END
|
|
266
|
+
# -----------------------------------------------------------------------------
|
|
267
|
+
# -----------------------------------------------------------------------------
|