vision-camera-face-detection 2.2.0 → 2.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/VisionCameraFaceDetection.podspec +1 -1
- package/android/src/main/java/com/visioncamerafacedetection/VisionCameraFaceDetectionPlugin.kt +100 -83
- package/android/src/main/java/com/visioncamerafacedetection/VisionCameraFaceDetectorOrientation.kt +57 -0
- package/ios/FaceHelper.swift +103 -220
- package/ios/VisionCameraFaceDetectionPlugin.swift +326 -306
- package/ios/VisionCameraFaceDetectorOrientation.swift +81 -0
- package/lib/commonjs/Camera.js +58 -34
- package/lib/commonjs/Camera.js.map +1 -1
- package/lib/commonjs/FaceDetector.js.map +1 -1
- package/lib/module/Camera.js +60 -34
- package/lib/module/Camera.js.map +1 -1
- package/lib/module/FaceDetector.js.map +1 -1
- package/lib/typescript/commonjs/src/Camera.d.ts.map +1 -1
- package/lib/typescript/commonjs/src/FaceDetector.d.ts +24 -6
- package/lib/typescript/commonjs/src/FaceDetector.d.ts.map +1 -1
- package/lib/typescript/module/src/Camera.d.ts.map +1 -1
- package/lib/typescript/module/src/FaceDetector.d.ts +24 -6
- package/lib/typescript/module/src/FaceDetector.d.ts.map +1 -1
- package/package.json +4 -4
- package/src/Camera.tsx +65 -33
- package/src/FaceDetector.ts +31 -6
|
@@ -10,330 +10,350 @@ import TensorFlowLite
|
|
|
10
10
|
|
|
11
11
|
@objc(VisionCameraFaceDetectionPlugin)
|
|
12
12
|
public class VisionCameraFaceDetectionPlugin: FrameProcessorPlugin {
|
|
13
|
-
|
|
14
|
-
|
|
13
|
+
enum CameraFacing: String {
|
|
14
|
+
case front = "front"
|
|
15
|
+
case back = "back"
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
// detection props
|
|
19
|
+
private var autoMode = false
|
|
20
|
+
private var faceDetector: FaceDetector! = nil
|
|
21
|
+
private var runLandmarks = false
|
|
22
|
+
private var runClassifications = false
|
|
23
|
+
private var runContours = false
|
|
24
|
+
private var trackingEnabled = false
|
|
25
|
+
private var windowWidth = 1.0
|
|
26
|
+
private var windowHeight = 1.0
|
|
27
|
+
private var cameraFacing:AVCaptureDevice.Position = .front
|
|
28
|
+
private var orientationManager = VisionCameraFaceDetectorOrientation()
|
|
29
|
+
private var enableTensor = false
|
|
30
|
+
|
|
31
|
+
public override init(
|
|
32
|
+
proxy: VisionCameraProxyHolder,
|
|
33
|
+
options: [AnyHashable : Any]! = [:]
|
|
34
|
+
) {
|
|
35
|
+
super.init(proxy: proxy, options: options)
|
|
36
|
+
let config = getConfig(withArguments: options)
|
|
15
37
|
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
private var runClassifications = false
|
|
21
|
-
private var runContours = false
|
|
22
|
-
private var trackingEnabled = false
|
|
23
|
-
private var enableTensor = false
|
|
38
|
+
let windowWidthParam = config?["windowWidth"] as? Double
|
|
39
|
+
if windowWidthParam != nil && windowWidthParam != windowWidth {
|
|
40
|
+
windowWidth = CGFloat(windowWidthParam!)
|
|
41
|
+
}
|
|
24
42
|
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
) {
|
|
29
|
-
super.init(proxy: proxy, options: options)
|
|
30
|
-
let config = getConfig(withArguments: options)
|
|
31
|
-
|
|
32
|
-
// handle auto scaling
|
|
33
|
-
autoScale = config?["autoScale"] as? Bool == true
|
|
34
|
-
|
|
35
|
-
// handle enable/disable tensor
|
|
36
|
-
enableTensor = config?["enableTensor"] as? Bool == true
|
|
37
|
-
|
|
38
|
-
// initializes faceDetector on creation
|
|
39
|
-
let minFaceSize = 0.15
|
|
40
|
-
let optionsBuilder = FaceDetectorOptions()
|
|
41
|
-
optionsBuilder.performanceMode = .fast
|
|
42
|
-
optionsBuilder.landmarkMode = .none
|
|
43
|
-
optionsBuilder.contourMode = .none
|
|
44
|
-
optionsBuilder.classificationMode = .none
|
|
45
|
-
optionsBuilder.minFaceSize = minFaceSize
|
|
46
|
-
optionsBuilder.isTrackingEnabled = false
|
|
47
|
-
|
|
48
|
-
if config?["performanceMode"] as? String == "accurate" {
|
|
49
|
-
optionsBuilder.performanceMode = .accurate
|
|
50
|
-
}
|
|
51
|
-
|
|
52
|
-
if config?["landmarkMode"] as? String == "all" {
|
|
53
|
-
runLandmarks = true
|
|
54
|
-
optionsBuilder.landmarkMode = .all
|
|
55
|
-
}
|
|
56
|
-
|
|
57
|
-
if config?["classificationMode"] as? String == "all" {
|
|
58
|
-
runClassifications = true
|
|
59
|
-
optionsBuilder.classificationMode = .all
|
|
60
|
-
}
|
|
61
|
-
|
|
62
|
-
if config?["contourMode"] as? String == "all" {
|
|
63
|
-
runContours = true
|
|
64
|
-
optionsBuilder.contourMode = .all
|
|
65
|
-
}
|
|
66
|
-
|
|
67
|
-
let minFaceSizeParam = config?["minFaceSize"] as? Double
|
|
68
|
-
if minFaceSizeParam != nil && minFaceSizeParam != minFaceSize {
|
|
69
|
-
optionsBuilder.minFaceSize = CGFloat(minFaceSizeParam!)
|
|
70
|
-
}
|
|
71
|
-
|
|
72
|
-
if config?["trackingEnabled"] as? Bool == true {
|
|
73
|
-
trackingEnabled = true
|
|
74
|
-
optionsBuilder.isTrackingEnabled = true
|
|
75
|
-
}
|
|
76
|
-
|
|
77
|
-
faceDetector = FaceDetector.faceDetector(options: optionsBuilder)
|
|
43
|
+
let windowHeightParam = config?["windowHeight"] as? Double
|
|
44
|
+
if windowHeightParam != nil && windowHeightParam != windowHeight {
|
|
45
|
+
windowHeight = CGFloat(windowHeightParam!)
|
|
78
46
|
}
|
|
79
47
|
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
) -> [String:Any]! {
|
|
83
|
-
if arguments.count > 0 {
|
|
84
|
-
let config = arguments.map { dictionary in
|
|
85
|
-
Dictionary(uniqueKeysWithValues: dictionary.map { (key, value) in
|
|
86
|
-
(key as? String ?? "", value)
|
|
87
|
-
})
|
|
88
|
-
}
|
|
89
|
-
|
|
90
|
-
return config
|
|
91
|
-
}
|
|
92
|
-
|
|
93
|
-
return nil
|
|
48
|
+
if config?["cameraFacing"] as? String == "back" {
|
|
49
|
+
cameraFacing = .back
|
|
94
50
|
}
|
|
95
51
|
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
"height": height,
|
|
113
|
-
"x": (-x + sourceWidth * scaleX) - width,
|
|
114
|
-
"y": y
|
|
115
|
-
]
|
|
52
|
+
// handle auto scaling and rotation
|
|
53
|
+
autoMode = config?["autoMode"] as? Bool == true
|
|
54
|
+
enableTensor = config?["enableTensor"] as? Bool == true
|
|
55
|
+
|
|
56
|
+
// initializes faceDetector on creation
|
|
57
|
+
let minFaceSize = 0.15
|
|
58
|
+
let optionsBuilder = FaceDetectorOptions()
|
|
59
|
+
optionsBuilder.performanceMode = .fast
|
|
60
|
+
optionsBuilder.landmarkMode = .none
|
|
61
|
+
optionsBuilder.contourMode = .none
|
|
62
|
+
optionsBuilder.classificationMode = .none
|
|
63
|
+
optionsBuilder.minFaceSize = minFaceSize
|
|
64
|
+
optionsBuilder.isTrackingEnabled = false
|
|
65
|
+
|
|
66
|
+
if config?["performanceMode"] as? String == "accurate" {
|
|
67
|
+
optionsBuilder.performanceMode = .accurate
|
|
116
68
|
}
|
|
117
69
|
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
70
|
+
if config?["landmarkMode"] as? String == "all" {
|
|
71
|
+
runLandmarks = true
|
|
72
|
+
optionsBuilder.landmarkMode = .all
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
if config?["classificationMode"] as? String == "all" {
|
|
76
|
+
runClassifications = true
|
|
77
|
+
optionsBuilder.classificationMode = .all
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
if config?["contourMode"] as? String == "all" {
|
|
81
|
+
runContours = true
|
|
82
|
+
optionsBuilder.contourMode = .all
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
let minFaceSizeParam = config?["minFaceSize"] as? Double
|
|
86
|
+
if minFaceSizeParam != nil && minFaceSizeParam != minFaceSize {
|
|
87
|
+
optionsBuilder.minFaceSize = CGFloat(minFaceSizeParam!)
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
if config?["trackingEnabled"] as? Bool == true {
|
|
91
|
+
trackingEnabled = true
|
|
92
|
+
optionsBuilder.isTrackingEnabled = true
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
faceDetector = FaceDetector.faceDetector(options: optionsBuilder)
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
func getConfig(
|
|
99
|
+
withArguments arguments: [AnyHashable: Any]!
|
|
100
|
+
) -> [String:Any]! {
|
|
101
|
+
if arguments.count > 0 {
|
|
102
|
+
let config = arguments.map { dictionary in
|
|
103
|
+
Dictionary(uniqueKeysWithValues: dictionary.map { (key, value) in
|
|
104
|
+
(key as? String ?? "", value)
|
|
105
|
+
})
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
return config
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
return nil
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
func processBoundingBox(
|
|
115
|
+
from face: Face,
|
|
116
|
+
sourceWidth: CGFloat,
|
|
117
|
+
sourceHeight: CGFloat,
|
|
118
|
+
scaleX: CGFloat,
|
|
119
|
+
scaleY: CGFloat
|
|
120
|
+
) -> [String:Any] {
|
|
121
|
+
let boundingBox = face.frame
|
|
122
|
+
let width = boundingBox.width * scaleX
|
|
123
|
+
let height = boundingBox.height * scaleY
|
|
124
|
+
// inverted because we also inverted sourceWidth/height
|
|
125
|
+
let x = boundingBox.origin.y * scaleX
|
|
126
|
+
let y = boundingBox.origin.x * scaleY
|
|
127
|
+
|
|
128
|
+
if(autoMode) {
|
|
129
|
+
return [
|
|
130
|
+
"width": width,
|
|
131
|
+
"height": height,
|
|
132
|
+
"x": (-x + sourceWidth * scaleX) - width,
|
|
133
|
+
"y": y
|
|
134
|
+
]
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
return [
|
|
138
|
+
"width": width,
|
|
139
|
+
"height": height,
|
|
140
|
+
"x": y,
|
|
141
|
+
"y": x
|
|
142
|
+
]
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
func processLandmarks(
|
|
146
|
+
from face: Face,
|
|
147
|
+
scaleX: CGFloat,
|
|
148
|
+
scaleY: CGFloat
|
|
149
|
+
) -> [String:[String: CGFloat?]] {
|
|
150
|
+
let faceLandmarkTypes = [
|
|
151
|
+
FaceLandmarkType.leftCheek,
|
|
152
|
+
FaceLandmarkType.leftEar,
|
|
153
|
+
FaceLandmarkType.leftEye,
|
|
154
|
+
FaceLandmarkType.mouthBottom,
|
|
155
|
+
FaceLandmarkType.mouthLeft,
|
|
156
|
+
FaceLandmarkType.mouthRight,
|
|
157
|
+
FaceLandmarkType.noseBase,
|
|
158
|
+
FaceLandmarkType.rightCheek,
|
|
159
|
+
FaceLandmarkType.rightEar,
|
|
160
|
+
FaceLandmarkType.rightEye
|
|
161
|
+
]
|
|
162
|
+
|
|
163
|
+
let faceLandmarksTypesStrings = [
|
|
164
|
+
"LEFT_CHEEK",
|
|
165
|
+
"LEFT_EAR",
|
|
166
|
+
"LEFT_EYE",
|
|
167
|
+
"MOUTH_BOTTOM",
|
|
168
|
+
"MOUTH_LEFT",
|
|
169
|
+
"MOUTH_RIGHT",
|
|
170
|
+
"NOSE_BASE",
|
|
171
|
+
"RIGHT_CHEEK",
|
|
172
|
+
"RIGHT_EAR",
|
|
173
|
+
"RIGHT_EYE"
|
|
174
|
+
];
|
|
175
|
+
|
|
176
|
+
var faceLandMarksTypesMap: [String: [String: CGFloat?]] = [:]
|
|
177
|
+
for i in 0..<faceLandmarkTypes.count {
|
|
178
|
+
let landmark = face.landmark(ofType: faceLandmarkTypes[i]);
|
|
179
|
+
let position = [
|
|
180
|
+
"x": landmark?.position.x ?? 0.0 * scaleX,
|
|
181
|
+
"y": landmark?.position.y ?? 0.0 * scaleY
|
|
182
|
+
]
|
|
183
|
+
faceLandMarksTypesMap[faceLandmarksTypesStrings[i]] = position
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
return faceLandMarksTypesMap
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
func processFaceContours(
|
|
190
|
+
from face: Face,
|
|
191
|
+
scaleX: CGFloat,
|
|
192
|
+
scaleY: CGFloat
|
|
193
|
+
) -> [String:[[String:CGFloat]]] {
|
|
194
|
+
let faceContoursTypes = [
|
|
195
|
+
FaceContourType.face,
|
|
196
|
+
FaceContourType.leftCheek,
|
|
197
|
+
FaceContourType.leftEye,
|
|
198
|
+
FaceContourType.leftEyebrowBottom,
|
|
199
|
+
FaceContourType.leftEyebrowTop,
|
|
200
|
+
FaceContourType.lowerLipBottom,
|
|
201
|
+
FaceContourType.lowerLipTop,
|
|
202
|
+
FaceContourType.noseBottom,
|
|
203
|
+
FaceContourType.noseBridge,
|
|
204
|
+
FaceContourType.rightCheek,
|
|
205
|
+
FaceContourType.rightEye,
|
|
206
|
+
FaceContourType.rightEyebrowBottom,
|
|
207
|
+
FaceContourType.rightEyebrowTop,
|
|
208
|
+
FaceContourType.upperLipBottom,
|
|
209
|
+
FaceContourType.upperLipTop
|
|
210
|
+
]
|
|
211
|
+
|
|
212
|
+
let faceContoursTypesStrings = [
|
|
213
|
+
"FACE",
|
|
214
|
+
"LEFT_CHEEK",
|
|
215
|
+
"LEFT_EYE",
|
|
216
|
+
"LEFT_EYEBROW_BOTTOM",
|
|
217
|
+
"LEFT_EYEBROW_TOP",
|
|
218
|
+
"LOWER_LIP_BOTTOM",
|
|
219
|
+
"LOWER_LIP_TOP",
|
|
220
|
+
"NOSE_BOTTOM",
|
|
221
|
+
"NOSE_BRIDGE",
|
|
222
|
+
"RIGHT_CHEEK",
|
|
223
|
+
"RIGHT_EYE",
|
|
224
|
+
"RIGHT_EYEBROW_BOTTOM",
|
|
225
|
+
"RIGHT_EYEBROW_TOP",
|
|
226
|
+
"UPPER_LIP_BOTTOM",
|
|
227
|
+
"UPPER_LIP_TOP"
|
|
228
|
+
];
|
|
229
|
+
|
|
230
|
+
var faceContoursTypesMap: [String:[[String:CGFloat]]] = [:]
|
|
231
|
+
for i in 0..<faceContoursTypes.count {
|
|
232
|
+
let contour = face.contour(ofType: faceContoursTypes[i]);
|
|
233
|
+
var pointsArray: [[String:CGFloat]] = []
|
|
234
|
+
|
|
235
|
+
if let points = contour?.points {
|
|
236
|
+
for point in points {
|
|
237
|
+
let currentPointsMap = [
|
|
238
|
+
"x": point.x * scaleX,
|
|
239
|
+
"y": point.y * scaleY,
|
|
240
|
+
]
|
|
241
|
+
|
|
242
|
+
pointsArray.append(currentPointsMap)
|
|
157
243
|
}
|
|
158
244
|
|
|
159
|
-
|
|
245
|
+
faceContoursTypesMap[faceContoursTypesStrings[i]] = pointsArray
|
|
246
|
+
}
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
return faceContoursTypesMap
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
func getImageOrientation() -> UIImage.Orientation {
|
|
253
|
+
switch orientationManager.orientation {
|
|
254
|
+
case .portrait:
|
|
255
|
+
return cameraFacing == .front ? .leftMirrored : .right
|
|
256
|
+
case .landscapeLeft:
|
|
257
|
+
return cameraFacing == .front ? .upMirrored : .up
|
|
258
|
+
case .portraitUpsideDown:
|
|
259
|
+
return cameraFacing == .front ? .rightMirrored : .left
|
|
260
|
+
case .landscapeRight:
|
|
261
|
+
return cameraFacing == .front ? .downMirrored : .down
|
|
262
|
+
@unknown default:
|
|
263
|
+
return .up
|
|
160
264
|
}
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
public override func callback(
|
|
268
|
+
_ frame: Frame,
|
|
269
|
+
withArguments arguments: [AnyHashable: Any]?
|
|
270
|
+
) -> Any? {
|
|
271
|
+
var result: [Any] = []
|
|
161
272
|
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
273
|
+
do {
|
|
274
|
+
// we need to invert sizes as frame is always -90deg rotated
|
|
275
|
+
let width = CGFloat(frame.height)
|
|
276
|
+
let height = CGFloat(frame.width)
|
|
277
|
+
let image = VisionImage(buffer: frame.buffer)
|
|
278
|
+
image.orientation = getImageOrientation()
|
|
279
|
+
|
|
280
|
+
var scaleX:CGFloat
|
|
281
|
+
var scaleY:CGFloat
|
|
282
|
+
if (autoMode) {
|
|
283
|
+
scaleX = windowWidth / width
|
|
284
|
+
scaleY = windowHeight / height
|
|
285
|
+
} else {
|
|
286
|
+
scaleX = CGFloat(1)
|
|
287
|
+
scaleY = CGFloat(1)
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
let faces: [Face] = try faceDetector!.results(in: image)
|
|
291
|
+
for face in faces {
|
|
292
|
+
var map: [String: Any] = [:]
|
|
293
|
+
if enableTensor {
|
|
294
|
+
guard let imageCrop = FaceHelper.getImageFaceFromBuffer(from: frame.buffer, rectImage: face.frame, orientation: image.orientation) else {
|
|
295
|
+
return nil
|
|
296
|
+
}
|
|
297
|
+
guard let rgbData = FaceHelper.rgbDataFromBuffer(imageCrop) else {
|
|
298
|
+
return nil
|
|
299
|
+
}
|
|
300
|
+
try interpreter?.copy(rgbData, toInputAt: 0)
|
|
301
|
+
try interpreter?.invoke()
|
|
302
|
+
let outputTensor: Tensor? = try interpreter?.output(at: 0)
|
|
303
|
+
|
|
304
|
+
if ((outputTensor?.data) != nil) {
|
|
305
|
+
let result: [Float] = [Float32](unsafeData: outputTensor!.data) ?? []
|
|
306
|
+
map["data"] = result
|
|
307
|
+
} else {
|
|
308
|
+
map["data"] = []
|
|
309
|
+
}
|
|
310
|
+
} else {
|
|
311
|
+
map["data"] = []
|
|
312
|
+
}
|
|
184
313
|
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
"LOWER_LIP_TOP",
|
|
193
|
-
"NOSE_BOTTOM",
|
|
194
|
-
"NOSE_BRIDGE",
|
|
195
|
-
"RIGHT_CHEEK",
|
|
196
|
-
"RIGHT_EYE",
|
|
197
|
-
"RIGHT_EYEBROW_BOTTOM",
|
|
198
|
-
"RIGHT_EYEBROW_TOP",
|
|
199
|
-
"UPPER_LIP_BOTTOM",
|
|
200
|
-
"UPPER_LIP_TOP"
|
|
201
|
-
];
|
|
314
|
+
if runLandmarks {
|
|
315
|
+
map["landmarks"] = processLandmarks(
|
|
316
|
+
from: face,
|
|
317
|
+
scaleX: scaleX,
|
|
318
|
+
scaleY: scaleY
|
|
319
|
+
)
|
|
320
|
+
}
|
|
202
321
|
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
if let points = contour?.points {
|
|
209
|
-
for point in points {
|
|
210
|
-
let currentPointsMap = [
|
|
211
|
-
"x": point.x * scaleX,
|
|
212
|
-
"y": point.y * scaleY,
|
|
213
|
-
]
|
|
214
|
-
|
|
215
|
-
pointsArray.append(currentPointsMap)
|
|
216
|
-
}
|
|
217
|
-
|
|
218
|
-
faceContoursTypesMap[faceContoursTypesStrings[i]] = pointsArray
|
|
219
|
-
}
|
|
322
|
+
if runClassifications {
|
|
323
|
+
map["leftEyeOpenProbability"] = face.leftEyeOpenProbability
|
|
324
|
+
map["rightEyeOpenProbability"] = face.rightEyeOpenProbability
|
|
325
|
+
map["smilingProbability"] = face.smilingProbability
|
|
220
326
|
}
|
|
221
327
|
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
switch orientation {
|
|
229
|
-
case .up:
|
|
230
|
-
// device is landscape left
|
|
231
|
-
return .up
|
|
232
|
-
case .left:
|
|
233
|
-
// device is portrait
|
|
234
|
-
return .right
|
|
235
|
-
case .down:
|
|
236
|
-
// device is landscape right
|
|
237
|
-
return .down
|
|
238
|
-
case .right:
|
|
239
|
-
// device is upside-down
|
|
240
|
-
return .left
|
|
241
|
-
default:
|
|
242
|
-
return .up
|
|
328
|
+
if runContours {
|
|
329
|
+
map["contours"] = processFaceContours(
|
|
330
|
+
from: face,
|
|
331
|
+
scaleX: scaleX,
|
|
332
|
+
scaleY: scaleY
|
|
333
|
+
)
|
|
243
334
|
}
|
|
244
|
-
}
|
|
245
|
-
|
|
246
|
-
public override func callback(
|
|
247
|
-
_ frame: Frame,
|
|
248
|
-
withArguments arguments: [AnyHashable: Any]?
|
|
249
|
-
) -> Any? {
|
|
250
|
-
var result: [Any] = []
|
|
251
335
|
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
let width = CGFloat(frame.height)
|
|
255
|
-
let height = CGFloat(frame.width)
|
|
256
|
-
let orientation = getOrientation(
|
|
257
|
-
orientation: frame.orientation
|
|
258
|
-
)
|
|
259
|
-
let image = VisionImage(buffer: frame.buffer)
|
|
260
|
-
image.orientation = orientation
|
|
261
|
-
|
|
262
|
-
var scaleX:CGFloat
|
|
263
|
-
var scaleY:CGFloat
|
|
264
|
-
if autoScale {
|
|
265
|
-
scaleX = screenBounds.size.width / width
|
|
266
|
-
scaleY = screenBounds.size.height / height
|
|
267
|
-
} else {
|
|
268
|
-
scaleX = CGFloat(1)
|
|
269
|
-
scaleY = CGFloat(1)
|
|
270
|
-
}
|
|
271
|
-
|
|
272
|
-
let faces: [Face] = try faceDetector!.results(in: image)
|
|
273
|
-
for face in faces {
|
|
274
|
-
var map: [String: Any] = [:]
|
|
275
|
-
if enableTensor {
|
|
276
|
-
guard let imageCrop = FaceHelper.getImageFaceFromBuffer(from: frame.buffer, rectImage: face.frame, orientation: orientation) else {
|
|
277
|
-
return nil
|
|
278
|
-
}
|
|
279
|
-
guard let pixelBuffer = FaceHelper.uiImageToPixelBuffer(image: imageCrop, size: inputWidth) else {
|
|
280
|
-
return nil
|
|
281
|
-
}
|
|
282
|
-
guard let rgbData = FaceHelper.rgbDataFromBuffer(pixelBuffer) else {
|
|
283
|
-
return nil
|
|
284
|
-
}
|
|
285
|
-
|
|
286
|
-
try interpreter?.copy(rgbData, toInputAt: 0)
|
|
287
|
-
try interpreter?.invoke()
|
|
288
|
-
let outputTensor: Tensor? = try interpreter?.output(at: 0)
|
|
289
|
-
|
|
290
|
-
if ((outputTensor?.data) != nil) {
|
|
291
|
-
let result: [Float] = [Float32](unsafeData: outputTensor!.data) ?? []
|
|
292
|
-
map["data"] = result
|
|
293
|
-
} else {
|
|
294
|
-
map["data"] = []
|
|
295
|
-
}
|
|
296
|
-
} else {
|
|
297
|
-
map["data"] = []
|
|
298
|
-
}
|
|
299
|
-
if runLandmarks {
|
|
300
|
-
map["landmarks"] = processLandmarks(
|
|
301
|
-
from: face,
|
|
302
|
-
scaleX: scaleX,
|
|
303
|
-
scaleY: scaleY
|
|
304
|
-
)
|
|
305
|
-
}
|
|
306
|
-
if runClassifications {
|
|
307
|
-
map["leftEyeOpenProbability"] = face.leftEyeOpenProbability
|
|
308
|
-
map["rightEyeOpenProbability"] = face.rightEyeOpenProbability
|
|
309
|
-
map["smilingProbability"] = face.smilingProbability
|
|
310
|
-
}
|
|
311
|
-
if runContours {
|
|
312
|
-
map["contours"] = processFaceContours(
|
|
313
|
-
from: face,
|
|
314
|
-
scaleX: scaleX,
|
|
315
|
-
scaleY: scaleY
|
|
316
|
-
)
|
|
317
|
-
}
|
|
318
|
-
if trackingEnabled {
|
|
319
|
-
map["trackingId"] = face.trackingID
|
|
320
|
-
}
|
|
321
|
-
map["rollAngle"] = face.headEulerAngleZ
|
|
322
|
-
map["pitchAngle"] = face.headEulerAngleX
|
|
323
|
-
map["yawAngle"] = face.headEulerAngleY
|
|
324
|
-
map["bounds"] = processBoundingBox(
|
|
325
|
-
from: face,
|
|
326
|
-
sourceWidth: width,
|
|
327
|
-
sourceHeight: height,
|
|
328
|
-
orientation: frame.orientation,
|
|
329
|
-
scaleX: scaleX,
|
|
330
|
-
scaleY: scaleY
|
|
331
|
-
)
|
|
332
|
-
result.append(map)
|
|
333
|
-
}
|
|
334
|
-
} catch let error {
|
|
335
|
-
print("Error processing face detection: \(error)")
|
|
336
|
+
if trackingEnabled {
|
|
337
|
+
map["trackingId"] = face.trackingID
|
|
336
338
|
}
|
|
337
|
-
|
|
339
|
+
|
|
340
|
+
map["rollAngle"] = face.headEulerAngleZ
|
|
341
|
+
map["pitchAngle"] = face.headEulerAngleX
|
|
342
|
+
map["yawAngle"] = face.headEulerAngleY
|
|
343
|
+
map["bounds"] = processBoundingBox(
|
|
344
|
+
from: face,
|
|
345
|
+
sourceWidth: width,
|
|
346
|
+
sourceHeight: height,
|
|
347
|
+
scaleX: scaleX,
|
|
348
|
+
scaleY: scaleY
|
|
349
|
+
)
|
|
350
|
+
|
|
351
|
+
result.append(map)
|
|
352
|
+
}
|
|
353
|
+
} catch let error {
|
|
354
|
+
print("Error processing face detection: \(error)")
|
|
338
355
|
}
|
|
356
|
+
|
|
357
|
+
return result
|
|
358
|
+
}
|
|
339
359
|
}
|