vision-camera-face-detection 2.2.0 → 2.2.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/VisionCameraFaceDetection.podspec +1 -1
- package/android/build.gradle +4 -3
- package/android/gradle.properties +0 -1
- package/android/src/main/java/com/visioncamerafacedetection/VisionCameraFaceDetectionPlugin.kt +100 -83
- package/android/src/main/java/com/visioncamerafacedetection/VisionCameraFaceDetectorOrientation.kt +57 -0
- package/ios/FaceHelper.swift +103 -220
- package/ios/VisionCameraFaceDetectionPlugin.swift +326 -306
- package/ios/VisionCameraFaceDetectorOrientation.swift +81 -0
- package/lib/commonjs/Camera.js +58 -34
- package/lib/commonjs/Camera.js.map +1 -1
- package/lib/commonjs/FaceDetector.js.map +1 -1
- package/lib/module/Camera.js +60 -34
- package/lib/module/Camera.js.map +1 -1
- package/lib/module/FaceDetector.js.map +1 -1
- package/lib/typescript/commonjs/src/Camera.d.ts.map +1 -1
- package/lib/typescript/commonjs/src/FaceDetector.d.ts +24 -6
- package/lib/typescript/commonjs/src/FaceDetector.d.ts.map +1 -1
- package/lib/typescript/module/src/Camera.d.ts.map +1 -1
- package/lib/typescript/module/src/FaceDetector.d.ts +24 -6
- package/lib/typescript/module/src/FaceDetector.d.ts.map +1 -1
- package/package.json +4 -4
- package/src/Camera.tsx +65 -33
- package/src/FaceDetector.ts +31 -6
|
@@ -11,7 +11,7 @@ Pod::Spec.new do |s|
|
|
|
11
11
|
s.license = package["license"]
|
|
12
12
|
s.authors = package["author"]
|
|
13
13
|
|
|
14
|
-
s.platforms = { :ios =>
|
|
14
|
+
s.platforms = { :ios => "15.5" } # 15.5 is the minimum version for GoogleMLKit/FaceDetection 7.0.0
|
|
15
15
|
s.source = { :git => "https://github.com/edritech93/vision-camera-face-detection.git", :tag => "#{s.version}" }
|
|
16
16
|
|
|
17
17
|
s.source_files = "ios/**/*.{h,m,mm,swift}"
|
package/android/build.gradle
CHANGED
|
@@ -80,7 +80,6 @@ repositories {
|
|
|
80
80
|
}
|
|
81
81
|
|
|
82
82
|
def kotlin_version = getExtOrDefault("kotlinVersion")
|
|
83
|
-
def tensor_version = getExtOrDefault("tensorVersion")
|
|
84
83
|
|
|
85
84
|
dependencies {
|
|
86
85
|
implementation "com.facebook.react:react-android"
|
|
@@ -89,7 +88,9 @@ dependencies {
|
|
|
89
88
|
implementation "androidx.annotation:annotation:1.8.2"
|
|
90
89
|
implementation "androidx.camera:camera-core:1.3.4"
|
|
91
90
|
implementation "com.google.mlkit:face-detection:16.1.7"
|
|
92
|
-
implementation
|
|
93
|
-
implementation '
|
|
91
|
+
implementation 'com.google.ai.edge.litert:litert:1.4.0'
|
|
92
|
+
implementation 'com.google.ai.edge.litert:litert-api:1.4.0'
|
|
93
|
+
implementation 'com.google.ai.edge.litert:litert-support:1.4.0'
|
|
94
|
+
implementation 'com.google.ai.edge.litert:litert-metadata:1.4.0'
|
|
94
95
|
}
|
|
95
96
|
|
package/android/src/main/java/com/visioncamerafacedetection/VisionCameraFaceDetectionPlugin.kt
CHANGED
|
@@ -6,6 +6,7 @@ import android.graphics.Matrix
|
|
|
6
6
|
import android.graphics.Rect
|
|
7
7
|
import android.graphics.RectF
|
|
8
8
|
import android.util.Log
|
|
9
|
+
import android.view.Surface
|
|
9
10
|
import com.google.android.gms.tasks.Tasks
|
|
10
11
|
import com.google.mlkit.vision.common.InputImage
|
|
11
12
|
import com.google.mlkit.vision.common.internal.ImageConvertUtils
|
|
@@ -17,6 +18,7 @@ import com.google.mlkit.vision.face.FaceDetectorOptions
|
|
|
17
18
|
import com.google.mlkit.vision.face.FaceLandmark
|
|
18
19
|
import com.mrousavy.camera.core.FrameInvalidError
|
|
19
20
|
import com.mrousavy.camera.core.types.Orientation
|
|
21
|
+
import com.mrousavy.camera.core.types.Position
|
|
20
22
|
import com.mrousavy.camera.frameprocessors.Frame
|
|
21
23
|
import com.mrousavy.camera.frameprocessors.FrameProcessorPlugin
|
|
22
24
|
import com.mrousavy.camera.frameprocessors.VisionCameraProxy
|
|
@@ -24,29 +26,26 @@ import java.nio.ByteBuffer
|
|
|
24
26
|
import java.nio.FloatBuffer
|
|
25
27
|
|
|
26
28
|
private const val TAG = "FaceDetector"
|
|
27
|
-
|
|
28
29
|
class VisionCameraFaceDetectionPlugin(
|
|
29
30
|
proxy: VisionCameraProxy,
|
|
30
31
|
options: Map<String, Any>?
|
|
31
32
|
) : FrameProcessorPlugin() {
|
|
32
|
-
// device display data
|
|
33
|
-
private val displayMetrics = proxy.context.resources.displayMetrics
|
|
34
|
-
private val density = displayMetrics.density
|
|
35
|
-
private val windowWidth = (displayMetrics.widthPixels).toDouble() / density
|
|
36
|
-
private val windowHeight = (displayMetrics.heightPixels).toDouble() / density
|
|
37
|
-
|
|
38
33
|
// detection props
|
|
39
|
-
private var
|
|
34
|
+
private var autoMode = false
|
|
40
35
|
private var faceDetector: FaceDetector? = null
|
|
41
36
|
private var runLandmarks = false
|
|
42
37
|
private var runClassifications = false
|
|
43
38
|
private var runContours = false
|
|
44
39
|
private var trackingEnabled = false
|
|
40
|
+
private var windowWidth = 1.0
|
|
41
|
+
private var windowHeight = 1.0
|
|
42
|
+
private var cameraFacing: Position = Position.FRONT
|
|
43
|
+
private val orientationManager = VisionCameraFaceDetectorOrientation(proxy.context)
|
|
45
44
|
private var enableTensor = false
|
|
46
45
|
|
|
47
46
|
init {
|
|
48
47
|
// handle auto scaling
|
|
49
|
-
|
|
48
|
+
autoMode = options?.get("autoMode").toString() == "true"
|
|
50
49
|
|
|
51
50
|
// handle enable/disable tensor
|
|
52
51
|
enableTensor = options?.get("enableTensor").toString() == "true"
|
|
@@ -56,7 +55,13 @@ class VisionCameraFaceDetectionPlugin(
|
|
|
56
55
|
var landmarkModeValue = FaceDetectorOptions.LANDMARK_MODE_NONE
|
|
57
56
|
var classificationModeValue = FaceDetectorOptions.CLASSIFICATION_MODE_NONE
|
|
58
57
|
var contourModeValue = FaceDetectorOptions.CONTOUR_MODE_NONE
|
|
59
|
-
|
|
58
|
+
|
|
59
|
+
windowWidth = (options?.get("windowWidth") ?: 1.0) as Double
|
|
60
|
+
windowHeight = (options?.get("windowHeight") ?: 1.0) as Double
|
|
61
|
+
|
|
62
|
+
if (options?.get("cameraFacing").toString() == "back") {
|
|
63
|
+
cameraFacing = Position.BACK
|
|
64
|
+
}
|
|
60
65
|
|
|
61
66
|
if (options?.get("performanceMode").toString() == "accurate") {
|
|
62
67
|
performanceModeValue = FaceDetectorOptions.PERFORMANCE_MODE_ACCURATE
|
|
@@ -77,25 +82,19 @@ class VisionCameraFaceDetectionPlugin(
|
|
|
77
82
|
contourModeValue = FaceDetectorOptions.CONTOUR_MODE_ALL
|
|
78
83
|
}
|
|
79
84
|
|
|
80
|
-
val
|
|
81
|
-
if (
|
|
82
|
-
minFaceSizeParam != "null" &&
|
|
83
|
-
minFaceSizeParam != minFaceSize.toString()
|
|
84
|
-
) {
|
|
85
|
-
minFaceSize = minFaceSizeParam.toFloat()
|
|
86
|
-
}
|
|
87
|
-
|
|
85
|
+
val minFaceSize: Double = (options?.get("minFaceSize") ?: 0.15) as Double
|
|
88
86
|
val optionsBuilder = FaceDetectorOptions.Builder()
|
|
89
87
|
.setPerformanceMode(performanceModeValue)
|
|
90
88
|
.setLandmarkMode(landmarkModeValue)
|
|
91
89
|
.setContourMode(contourModeValue)
|
|
92
90
|
.setClassificationMode(classificationModeValue)
|
|
93
|
-
.setMinFaceSize(minFaceSize)
|
|
91
|
+
.setMinFaceSize(minFaceSize.toFloat())
|
|
94
92
|
|
|
95
93
|
if (options?.get("trackingEnabled").toString() == "true") {
|
|
96
94
|
trackingEnabled = true
|
|
97
95
|
optionsBuilder.enableTracking()
|
|
98
96
|
}
|
|
97
|
+
|
|
99
98
|
faceDetector = FaceDetection.getClient(
|
|
100
99
|
optionsBuilder.build()
|
|
101
100
|
)
|
|
@@ -105,43 +104,72 @@ class VisionCameraFaceDetectionPlugin(
|
|
|
105
104
|
boundingBox: Rect,
|
|
106
105
|
sourceWidth: Double,
|
|
107
106
|
sourceHeight: Double,
|
|
108
|
-
orientation: Orientation,
|
|
109
107
|
scaleX: Double,
|
|
110
108
|
scaleY: Double
|
|
111
109
|
): Map<String, Any> {
|
|
112
110
|
val bounds: MutableMap<String, Any> = HashMap()
|
|
113
111
|
val width = boundingBox.width().toDouble() * scaleX
|
|
114
112
|
val height = boundingBox.height().toDouble() * scaleY
|
|
115
|
-
val x = boundingBox.left.toDouble()
|
|
116
|
-
val y = boundingBox.top.toDouble()
|
|
113
|
+
val x = boundingBox.left.toDouble()
|
|
114
|
+
val y = boundingBox.top.toDouble()
|
|
117
115
|
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
bounds["y"] = (-x + sourceHeight * scaleY) - height
|
|
123
|
-
}
|
|
116
|
+
bounds["width"] = width
|
|
117
|
+
bounds["height"] = height
|
|
118
|
+
bounds["x"] = x * scaleX
|
|
119
|
+
bounds["y"] = y * scaleY
|
|
124
120
|
|
|
125
|
-
|
|
126
|
-
// device is portrait
|
|
127
|
-
bounds["x"] = (-x + sourceWidth * scaleX) - width
|
|
128
|
-
bounds["y"] = y
|
|
129
|
-
}
|
|
121
|
+
if(!autoMode) return bounds
|
|
130
122
|
|
|
131
|
-
|
|
123
|
+
// using front camera
|
|
124
|
+
if(cameraFacing == Position.FRONT) {
|
|
125
|
+
when (orientationManager.orientation) {
|
|
126
|
+
// device is portrait
|
|
127
|
+
Surface.ROTATION_0 -> {
|
|
128
|
+
bounds["x"] = ((-x * scaleX) + sourceWidth * scaleX) - width
|
|
129
|
+
bounds["y"] = y * scaleY
|
|
130
|
+
}
|
|
132
131
|
// device is landscape right
|
|
133
|
-
|
|
134
|
-
|
|
132
|
+
Surface.ROTATION_270 -> {
|
|
133
|
+
bounds["x"] = y * scaleX
|
|
134
|
+
bounds["y"] = x * scaleY
|
|
135
|
+
}
|
|
136
|
+
// device is upside down
|
|
137
|
+
Surface.ROTATION_180 -> {
|
|
138
|
+
bounds["x"] = x * scaleX
|
|
139
|
+
bounds["y"] = ((-y * scaleY) + sourceHeight * scaleY) - height
|
|
140
|
+
}
|
|
141
|
+
// device is landscape left
|
|
142
|
+
Surface.ROTATION_90 -> {
|
|
143
|
+
bounds["x"] = ((-y * scaleX) + sourceWidth * scaleX) - width
|
|
144
|
+
bounds["y"] = ((-x * scaleY) + sourceHeight * scaleY) - height
|
|
145
|
+
}
|
|
135
146
|
}
|
|
147
|
+
return bounds
|
|
148
|
+
}
|
|
136
149
|
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
150
|
+
// using back camera
|
|
151
|
+
when (orientationManager.orientation) {
|
|
152
|
+
// device is portrait
|
|
153
|
+
Surface.ROTATION_0 -> {
|
|
154
|
+
bounds["x"] = x * scaleX
|
|
155
|
+
bounds["y"] = y * scaleY
|
|
156
|
+
}
|
|
157
|
+
// device is landscape right
|
|
158
|
+
Surface.ROTATION_270 -> {
|
|
159
|
+
bounds["x"] = y * scaleX
|
|
160
|
+
bounds["y"] = ((-x * scaleY) + sourceHeight * scaleY) - height
|
|
161
|
+
}
|
|
162
|
+
// device is upside down
|
|
163
|
+
Surface.ROTATION_180 -> {
|
|
164
|
+
bounds["x"] =((-x * scaleX) + sourceWidth * scaleX) - width
|
|
165
|
+
bounds["y"] = ((-y * scaleY) + sourceHeight * scaleY) - height
|
|
166
|
+
}
|
|
167
|
+
// device is landscape left
|
|
168
|
+
Surface.ROTATION_90 -> {
|
|
169
|
+
bounds["x"] = ((-y * scaleX) + sourceWidth * scaleX) - width
|
|
170
|
+
bounds["y"] = x * scaleY
|
|
141
171
|
}
|
|
142
172
|
}
|
|
143
|
-
bounds["width"] = width
|
|
144
|
-
bounds["height"] = height
|
|
145
173
|
return bounds
|
|
146
174
|
}
|
|
147
175
|
|
|
@@ -178,23 +206,16 @@ class VisionCameraFaceDetectionPlugin(
|
|
|
178
206
|
for (i in faceLandmarksTypesStrings.indices) {
|
|
179
207
|
val landmark = face.getLandmark(faceLandmarksTypes[i])
|
|
180
208
|
val landmarkName = faceLandmarksTypesStrings[i]
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
)
|
|
185
|
-
if (landmark == null) {
|
|
186
|
-
Log.d(
|
|
187
|
-
TAG,
|
|
188
|
-
"Landmark '$landmarkName' is null - going next"
|
|
189
|
-
)
|
|
190
|
-
continue
|
|
191
|
-
}
|
|
209
|
+
|
|
210
|
+
if (landmark == null) continue
|
|
211
|
+
|
|
192
212
|
val point = landmark.position
|
|
193
213
|
val currentPointsMap: MutableMap<String, Double> = HashMap()
|
|
194
214
|
currentPointsMap["x"] = point.x.toDouble() * scaleX
|
|
195
215
|
currentPointsMap["y"] = point.y.toDouble() * scaleY
|
|
196
216
|
faceLandmarksTypesMap[landmarkName] = currentPointsMap
|
|
197
217
|
}
|
|
218
|
+
|
|
198
219
|
return faceLandmarksTypesMap
|
|
199
220
|
}
|
|
200
221
|
|
|
@@ -241,42 +262,34 @@ class VisionCameraFaceDetectionPlugin(
|
|
|
241
262
|
for (i in faceContoursTypesStrings.indices) {
|
|
242
263
|
val contour = face.getContour(faceContoursTypes[i])
|
|
243
264
|
val contourName = faceContoursTypesStrings[i]
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
)
|
|
248
|
-
if (contour == null) {
|
|
249
|
-
Log.d(
|
|
250
|
-
TAG,
|
|
251
|
-
"Face contour '$contourName' is null - going next"
|
|
252
|
-
)
|
|
253
|
-
continue
|
|
254
|
-
}
|
|
265
|
+
|
|
266
|
+
if (contour == null) continue
|
|
267
|
+
|
|
255
268
|
val points = contour.points
|
|
256
|
-
val pointsMap:
|
|
269
|
+
val pointsMap: MutableList<Map<String, Double>> = mutableListOf()
|
|
257
270
|
for (j in points.indices) {
|
|
258
271
|
val currentPointsMap: MutableMap<String, Double> = HashMap()
|
|
259
272
|
currentPointsMap["x"] = points[j].x.toDouble() * scaleX
|
|
260
273
|
currentPointsMap["y"] = points[j].y.toDouble() * scaleY
|
|
261
|
-
pointsMap
|
|
274
|
+
pointsMap.add(currentPointsMap)
|
|
262
275
|
}
|
|
276
|
+
|
|
263
277
|
faceContoursTypesMap[contourName] = pointsMap
|
|
264
278
|
}
|
|
265
279
|
return faceContoursTypesMap
|
|
266
280
|
}
|
|
267
281
|
|
|
268
|
-
private fun
|
|
269
|
-
orientation
|
|
270
|
-
): Int {
|
|
271
|
-
return when (orientation) {
|
|
272
|
-
// device is landscape left
|
|
273
|
-
Orientation.PORTRAIT -> 0
|
|
282
|
+
private fun getImageOrientation(): Int {
|
|
283
|
+
return when (orientationManager.orientation) {
|
|
274
284
|
// device is portrait
|
|
275
|
-
|
|
285
|
+
Surface.ROTATION_0 -> if(cameraFacing == Position.FRONT) 270 else 90
|
|
276
286
|
// device is landscape right
|
|
277
|
-
|
|
278
|
-
// device is upside
|
|
279
|
-
|
|
287
|
+
Surface.ROTATION_270 -> if(cameraFacing == Position.FRONT) 180 else 180
|
|
288
|
+
// device is upside down
|
|
289
|
+
Surface.ROTATION_180 -> if(cameraFacing == Position.FRONT) 90 else 270
|
|
290
|
+
// device is landscape left
|
|
291
|
+
Surface.ROTATION_90 -> if(cameraFacing == Position.FRONT) 0 else 0
|
|
292
|
+
else -> 0
|
|
280
293
|
}
|
|
281
294
|
}
|
|
282
295
|
|
|
@@ -285,19 +298,19 @@ class VisionCameraFaceDetectionPlugin(
|
|
|
285
298
|
params: Map<String, Any>?
|
|
286
299
|
): Any {
|
|
287
300
|
val result = ArrayList<Map<String, Any>>()
|
|
301
|
+
|
|
288
302
|
try {
|
|
289
|
-
val
|
|
290
|
-
val image = InputImage.fromMediaImage(frame.image, orientation)
|
|
303
|
+
val image = InputImage.fromMediaImage(frame.image, getImageOrientation())
|
|
291
304
|
// we need to invert sizes as frame is always -90deg rotated
|
|
292
305
|
val width = image.height.toDouble()
|
|
293
306
|
val height = image.width.toDouble()
|
|
294
|
-
val scaleX = if
|
|
295
|
-
val scaleY = if
|
|
307
|
+
val scaleX = if(autoMode) windowWidth / width else 1.0
|
|
308
|
+
val scaleY = if(autoMode) windowHeight / height else 1.0
|
|
296
309
|
val task = faceDetector!!.process(image)
|
|
297
310
|
val faces = Tasks.await(task)
|
|
298
|
-
faces.forEach
|
|
311
|
+
faces.forEach{face ->
|
|
299
312
|
val map: MutableMap<String, Any> = HashMap()
|
|
300
|
-
val arrayData: MutableList<
|
|
313
|
+
val arrayData: MutableList<Double> = ArrayList()
|
|
301
314
|
if (enableTensor) {
|
|
302
315
|
val bmpFrameResult = ImageConvertUtils.getInstance().getUpRightBitmap(image)
|
|
303
316
|
val bmpFaceResult =
|
|
@@ -333,11 +346,13 @@ class VisionCameraFaceDetectionPlugin(
|
|
|
333
346
|
scaleY
|
|
334
347
|
)
|
|
335
348
|
}
|
|
349
|
+
|
|
336
350
|
if (runClassifications) {
|
|
337
351
|
map["leftEyeOpenProbability"] = face.leftEyeOpenProbability?.toDouble() ?: -1
|
|
338
352
|
map["rightEyeOpenProbability"] = face.rightEyeOpenProbability?.toDouble() ?: -1
|
|
339
353
|
map["smilingProbability"] = face.smilingProbability?.toDouble() ?: -1
|
|
340
354
|
}
|
|
355
|
+
|
|
341
356
|
if (runContours) {
|
|
342
357
|
map["contours"] = processFaceContours(
|
|
343
358
|
face,
|
|
@@ -345,9 +360,11 @@ class VisionCameraFaceDetectionPlugin(
|
|
|
345
360
|
scaleY
|
|
346
361
|
)
|
|
347
362
|
}
|
|
363
|
+
|
|
348
364
|
if (trackingEnabled) {
|
|
349
365
|
map["trackingId"] = face.trackingId ?: -1
|
|
350
366
|
}
|
|
367
|
+
|
|
351
368
|
map["rollAngle"] = face.headEulerAngleZ.toDouble()
|
|
352
369
|
map["pitchAngle"] = face.headEulerAngleX.toDouble()
|
|
353
370
|
map["yawAngle"] = face.headEulerAngleY.toDouble()
|
|
@@ -355,7 +372,6 @@ class VisionCameraFaceDetectionPlugin(
|
|
|
355
372
|
face.boundingBox,
|
|
356
373
|
width,
|
|
357
374
|
height,
|
|
358
|
-
frame.orientation,
|
|
359
375
|
scaleX,
|
|
360
376
|
scaleY
|
|
361
377
|
)
|
|
@@ -366,6 +382,7 @@ class VisionCameraFaceDetectionPlugin(
|
|
|
366
382
|
} catch (e: FrameInvalidError) {
|
|
367
383
|
Log.e(TAG, "Frame invalid error: ", e)
|
|
368
384
|
}
|
|
385
|
+
|
|
369
386
|
return result
|
|
370
387
|
}
|
|
371
388
|
}
|
package/android/src/main/java/com/visioncamerafacedetection/VisionCameraFaceDetectorOrientation.kt
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
package com.visioncamerafacedetection
|
|
2
|
+
|
|
3
|
+
import android.content.Context
|
|
4
|
+
import android.util.Log
|
|
5
|
+
import android.view.OrientationEventListener
|
|
6
|
+
import android.view.Surface
|
|
7
|
+
|
|
8
|
+
private const val TAG = "FaceDetectorOrientation"
|
|
9
|
+
class VisionCameraFaceDetectorOrientation(private val context: Context) {
|
|
10
|
+
var orientation = Surface.ROTATION_0
|
|
11
|
+
private var orientationListener: OrientationEventListener? = null
|
|
12
|
+
|
|
13
|
+
init {
|
|
14
|
+
if (orientationListener == null) {
|
|
15
|
+
Log.d(TAG, "Assigning new device orientation listener")
|
|
16
|
+
orientationListener = object : OrientationEventListener(context) {
|
|
17
|
+
override fun onOrientationChanged(rotationDegrees: Int) {
|
|
18
|
+
orientation = degreesToSurfaceRotation(rotationDegrees)
|
|
19
|
+
}
|
|
20
|
+
}
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
orientation = Surface.ROTATION_0
|
|
24
|
+
startDeviceOrientationListener()
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
protected fun finalize() {
|
|
28
|
+
stopDeviceOrientationListener()
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
private fun startDeviceOrientationListener() {
|
|
32
|
+
stopDeviceOrientationListener()
|
|
33
|
+
|
|
34
|
+
if (
|
|
35
|
+
orientationListener != null &&
|
|
36
|
+
orientationListener!!.canDetectOrientation()
|
|
37
|
+
) {
|
|
38
|
+
Log.d(TAG, "Enabling device orientation listener")
|
|
39
|
+
orientationListener!!.enable()
|
|
40
|
+
}
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
private fun stopDeviceOrientationListener() {
|
|
44
|
+
if (orientationListener != null) {
|
|
45
|
+
Log.d(TAG, "Disabled device orientation listener")
|
|
46
|
+
orientationListener!!.disable()
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
private fun degreesToSurfaceRotation(degrees: Int): Int =
|
|
51
|
+
when (degrees) {
|
|
52
|
+
in 45..135 -> Surface.ROTATION_270
|
|
53
|
+
in 135..225 -> Surface.ROTATION_180
|
|
54
|
+
in 225..315 -> Surface.ROTATION_90
|
|
55
|
+
else -> Surface.ROTATION_0
|
|
56
|
+
}
|
|
57
|
+
}
|