remote-calibrator 0.3.0 → 0.5.0-beta.3
Sign up to get free protection for your applications and to get access to all the features.
- package/CHANGELOG.md +12 -0
- package/README.md +29 -19
- package/homepage/example.js +9 -3
- package/i18n/fetch-languages-sheets.js +5 -4
- package/lib/RemoteCalibrator.min.js +1 -1
- package/lib/RemoteCalibrator.min.js.LICENSE.txt +1 -1
- package/lib/RemoteCalibrator.min.js.map +1 -1
- package/package.json +15 -15
- package/src/WebGazer4RC/.gitattributes +10 -0
- package/src/WebGazer4RC/LICENSE.md +15 -0
- package/src/WebGazer4RC/README.md +142 -0
- package/src/WebGazer4RC/gnu-lgpl-v3.0.md +163 -0
- package/src/WebGazer4RC/gplv3.md +636 -0
- package/src/WebGazer4RC/package-lock.json +1133 -0
- package/src/WebGazer4RC/package.json +28 -0
- package/src/WebGazer4RC/src/dom_util.mjs +27 -0
- package/src/WebGazer4RC/src/facemesh.mjs +150 -0
- package/src/WebGazer4RC/src/index.mjs +1235 -0
- package/src/WebGazer4RC/src/mat.mjs +301 -0
- package/src/WebGazer4RC/src/params.mjs +29 -0
- package/src/WebGazer4RC/src/pupil.mjs +109 -0
- package/src/WebGazer4RC/src/ridgeReg.mjs +104 -0
- package/src/WebGazer4RC/src/ridgeRegThreaded.mjs +161 -0
- package/src/WebGazer4RC/src/ridgeWeightedReg.mjs +125 -0
- package/src/WebGazer4RC/src/ridgeWorker.mjs +135 -0
- package/src/WebGazer4RC/src/util.mjs +348 -0
- package/src/WebGazer4RC/src/util_regression.mjs +240 -0
- package/src/WebGazer4RC/src/worker_scripts/mat.js +306 -0
- package/src/WebGazer4RC/src/worker_scripts/util.js +398 -0
- package/src/WebGazer4RC/test/regression_test.js +182 -0
- package/src/WebGazer4RC/test/run_tests_and_server.sh +24 -0
- package/src/WebGazer4RC/test/util_test.js +60 -0
- package/src/WebGazer4RC/test/webgazerExtract_test.js +40 -0
- package/src/WebGazer4RC/test/webgazer_test.js +160 -0
- package/src/WebGazer4RC/test/www_page_test.js +41 -0
- package/src/const.js +3 -0
- package/src/core.js +8 -0
- package/src/css/distance.scss +40 -0
- package/src/css/panel.scss +32 -1
- package/src/distance/distance.js +4 -4
- package/src/distance/distanceCheck.js +115 -0
- package/src/distance/distanceTrack.js +99 -41
- package/src/{interpupillaryDistance.js → distance/interPupillaryDistance.js} +14 -12
- package/src/gaze/gazeTracker.js +16 -1
- package/src/i18n.js +1 -1
- package/src/index.js +2 -1
- package/src/panel.js +32 -3
- package/webpack.config.js +4 -4
@@ -0,0 +1,1235 @@
|
|
1
|
+
/* eslint-disable */
|
2
|
+
// Search LOOP for the loop function.
|
3
|
+
|
4
|
+
import '@tensorflow/tfjs';
|
5
|
+
//import(/* webpackPreload: true */ '@tensorflow/tfjs');
|
6
|
+
//import(/* webpackChunkName: 'pageA' */ './vendors~main.js')
|
7
|
+
|
8
|
+
import 'regression';
|
9
|
+
import params from './params.mjs';
|
10
|
+
import './dom_util.mjs';
|
11
|
+
import localforage from 'localforage';
|
12
|
+
import TFFaceMesh from './facemesh.mjs';
|
13
|
+
import Reg from './ridgeReg.mjs';
|
14
|
+
import ridgeRegWeighted from './ridgeWeightedReg.mjs';
|
15
|
+
import ridgeRegThreaded from './ridgeRegThreaded.mjs';
|
16
|
+
import util from './util.mjs';
|
17
|
+
|
18
|
+
const webgazer = {};
|
19
|
+
webgazer.tracker = {};
|
20
|
+
webgazer.tracker.TFFaceMesh = TFFaceMesh;
|
21
|
+
webgazer.reg = Reg;
|
22
|
+
webgazer.reg.RidgeWeightedReg = ridgeRegWeighted.RidgeWeightedReg;
|
23
|
+
webgazer.reg.RidgeRegThreaded = ridgeRegThreaded.RidgeRegThreaded;
|
24
|
+
webgazer.util = util;
|
25
|
+
webgazer.params = params;
|
26
|
+
|
27
|
+
//PRIVATE VARIABLES
|
28
|
+
|
29
|
+
//video elements
|
30
|
+
var videoStream = null;
|
31
|
+
var videoContainerElement = null;
|
32
|
+
var videoElement = null;
|
33
|
+
var videoElementCanvas = null;
|
34
|
+
var faceOverlay = null;
|
35
|
+
var faceFeedbackBox = null;
|
36
|
+
var gazeDot = null;
|
37
|
+
// Why is this not in webgazer.params ?
|
38
|
+
var debugVideoLoc = '';
|
39
|
+
|
40
|
+
/*
|
41
|
+
* Initialises variables used to store accuracy eigenValues
|
42
|
+
* This is used by the calibration example file
|
43
|
+
*/
|
44
|
+
var xPast50 = new Array(50);
|
45
|
+
var yPast50 = new Array(50);
|
46
|
+
|
47
|
+
// loop parameters
|
48
|
+
var clockStart = performance.now();
|
49
|
+
var latestEyeFeatures = null;
|
50
|
+
var latestGazeData = null;
|
51
|
+
/* -------------------------------------------------------------------------- */
|
52
|
+
webgazer.params.paused = false;
|
53
|
+
|
54
|
+
webgazer.params.greedyLearner = false;
|
55
|
+
webgazer.params.framerate = 60;
|
56
|
+
webgazer.params.showGazeDot = false;
|
57
|
+
|
58
|
+
webgazer.params.getLatestVideoFrameTimestamp = () => {};
|
59
|
+
/* -------------------------------------------------------------------------- */
|
60
|
+
// registered callback for loop
|
61
|
+
var nopCallback = function(data) {};
|
62
|
+
var callback = nopCallback;
|
63
|
+
|
64
|
+
let learning = false // Regression
|
65
|
+
|
66
|
+
//Types that regression systems should handle
|
67
|
+
//Describes the source of data so that regression systems may ignore or handle differently the various generating events
|
68
|
+
var eventTypes = ['click', 'move'];
|
69
|
+
|
70
|
+
//movelistener timeout clock parameters
|
71
|
+
var moveClock = performance.now();
|
72
|
+
//currently used tracker and regression models, defaults to clmtrackr and linear regression
|
73
|
+
var curTracker = new webgazer.tracker.TFFaceMesh();
|
74
|
+
var regs = [new webgazer.reg.RidgeReg()];
|
75
|
+
// var blinkDetector = new webgazer.BlinkDetector();
|
76
|
+
|
77
|
+
//lookup tables
|
78
|
+
var curTrackerMap = {
|
79
|
+
'TFFacemesh': function() { return new webgazer.tracker.TFFaceMesh(); },
|
80
|
+
};
|
81
|
+
var regressionMap = {
|
82
|
+
'ridge': function() { return new webgazer.reg.RidgeReg(); },
|
83
|
+
'weightedRidge': function() { return new webgazer.reg.RidgeWeightedReg(); },
|
84
|
+
'threadedRidge': function() { return new webgazer.reg.RidgeRegThreaded(); },
|
85
|
+
};
|
86
|
+
|
87
|
+
//localstorage name
|
88
|
+
var localstorageDataLabel = 'webgazerGlobalData';
|
89
|
+
var localstorageSettingsLabel = 'webgazerGlobalSettings';
|
90
|
+
//settings object for future storage of settings
|
91
|
+
var settings = {};
|
92
|
+
var data = [];
|
93
|
+
var defaults = {
|
94
|
+
'data': [],
|
95
|
+
'settings': {}
|
96
|
+
};
|
97
|
+
|
98
|
+
|
99
|
+
//PRIVATE FUNCTIONS
|
100
|
+
|
101
|
+
/**
|
102
|
+
* Computes the size of the face overlay validation box depending on the size of the video preview window.
|
103
|
+
* @returns {Object} The dimensions of the validation box as top, left, width, height.
|
104
|
+
*/
|
105
|
+
webgazer.computeValidationBoxSize = function() {
|
106
|
+
|
107
|
+
var vw = videoElement.videoWidth;
|
108
|
+
var vh = videoElement.videoHeight;
|
109
|
+
var pw = parseInt(videoElement.style.width);
|
110
|
+
var ph = parseInt(videoElement.style.height);
|
111
|
+
|
112
|
+
// Find the size of the box.
|
113
|
+
// Pick the smaller of the two video preview sizes
|
114
|
+
var smaller = Math.min( vw, vh );
|
115
|
+
var larger = Math.max( vw, vh );
|
116
|
+
|
117
|
+
// Overall scalar
|
118
|
+
var scalar = ( vw == larger ? pw / vw : ph / vh );
|
119
|
+
|
120
|
+
// Multiply this by 2/3, then adjust it to the size of the preview
|
121
|
+
var boxSize = (smaller * webgazer.params.faceFeedbackBoxRatio) * scalar;
|
122
|
+
|
123
|
+
// Set the boundaries of the face overlay validation box based on the preview
|
124
|
+
var topVal = (ph - boxSize)/2;
|
125
|
+
var leftVal = (pw - boxSize)/2;
|
126
|
+
|
127
|
+
// top, left, width, height
|
128
|
+
return [topVal, leftVal, boxSize, boxSize]
|
129
|
+
}
|
130
|
+
|
131
|
+
let _w, _h, _smaller, _boxSize, _topBound, _leftBound, _rightBound, _bottomBound
|
132
|
+
let _eyeLX, _eyeLY, _eyeRX, _eyeRY
|
133
|
+
let hasBounds = false
|
134
|
+
let gettingBounds = false
|
135
|
+
|
136
|
+
function _helper_getBounds() {
|
137
|
+
gettingBounds = true
|
138
|
+
setTimeout(() => {
|
139
|
+
_w = videoElement.videoWidth
|
140
|
+
_h = videoElement.videoHeight
|
141
|
+
|
142
|
+
// Find the size of the box.
|
143
|
+
// Pick the smaller of the two video preview sizes
|
144
|
+
_smaller = Math.min(_w, _h)
|
145
|
+
_boxSize = _smaller * webgazer.params.faceFeedbackBoxRatio
|
146
|
+
|
147
|
+
// Set the boundaries of the face overlay validation box based on the preview
|
148
|
+
_topBound = (_h - _boxSize) / 2
|
149
|
+
_leftBound = (_w - _boxSize) / 2
|
150
|
+
_rightBound = _leftBound + _boxSize
|
151
|
+
_bottomBound = _topBound + _boxSize
|
152
|
+
|
153
|
+
hasBounds = true
|
154
|
+
gettingBounds = false
|
155
|
+
}, 500)
|
156
|
+
}
|
157
|
+
|
158
|
+
// TODO WebGazer doesn't provide correct validation feedback
|
159
|
+
/**
|
160
|
+
* Checks if the pupils are in the position box on the video
|
161
|
+
*/
|
162
|
+
function checkEyesInValidationBox() {
|
163
|
+
if (faceFeedbackBox !== null && latestEyeFeatures) {
|
164
|
+
if (!hasBounds && !gettingBounds) _helper_getBounds()
|
165
|
+
|
166
|
+
// Get the x and y positions of the left and right eyes
|
167
|
+
_eyeLX = _w - latestEyeFeatures.left.imagex
|
168
|
+
_eyeRX = _w - latestEyeFeatures.right.imagex
|
169
|
+
_eyeLY = latestEyeFeatures.left.imagey
|
170
|
+
_eyeRY = latestEyeFeatures.right.imagey
|
171
|
+
|
172
|
+
if (
|
173
|
+
hasBounds &&
|
174
|
+
_eyeLX > _leftBound &&
|
175
|
+
_eyeLX < _rightBound &&
|
176
|
+
_eyeRX > _leftBound &&
|
177
|
+
_eyeRX < _rightBound &&
|
178
|
+
_eyeLY > _topBound &&
|
179
|
+
_eyeLY < _bottomBound &&
|
180
|
+
_eyeRY > _topBound &&
|
181
|
+
_eyeRY < _bottomBound
|
182
|
+
) {
|
183
|
+
faceFeedbackBox.style.border = 'solid gray 2px'
|
184
|
+
} else {
|
185
|
+
faceFeedbackBox.style.border = 'solid red 4px'
|
186
|
+
}
|
187
|
+
} else faceFeedbackBox.style.border = 'solid red 4px'
|
188
|
+
}
|
189
|
+
|
190
|
+
/**
|
191
|
+
* This draws the point (x,y) onto the canvas in the HTML
|
192
|
+
* @param {colour} colour - The colour of the circle to plot
|
193
|
+
* @param {x} x - The x co-ordinate of the desired point to plot
|
194
|
+
* @param {y} y - The y co-ordinate of the desired point to plot
|
195
|
+
*/
|
196
|
+
function drawCoordinates(colour, x, y) {
|
197
|
+
var ctx = document.getElementById("gaze-accuracy-canvas").getContext('2d');
|
198
|
+
ctx.fillStyle = colour; // Red color
|
199
|
+
ctx.beginPath();
|
200
|
+
ctx.arc(x, y, 5, 0, Math.PI * 2, true);
|
201
|
+
ctx.fill();
|
202
|
+
}
|
203
|
+
|
204
|
+
/**
|
205
|
+
* Gets the pupil features by following the pipeline which threads an eyes object through each call:
|
206
|
+
* curTracker gets eye patches -> blink detector -> pupil detection
|
207
|
+
* @param {Canvas} canvas - a canvas which will have the video drawn onto it
|
208
|
+
* @param {Number} width - the width of canvas
|
209
|
+
* @param {Number} height - the height of canvas
|
210
|
+
*/
|
211
|
+
function getPupilFeatures(canvas, width, height) {
|
212
|
+
if (!canvas) {
|
213
|
+
return;
|
214
|
+
}
|
215
|
+
try {
|
216
|
+
return curTracker.getEyePatches(canvas, width, height);
|
217
|
+
} catch(err) {
|
218
|
+
console.log("can't get pupil features ", err);
|
219
|
+
return null;
|
220
|
+
}
|
221
|
+
}
|
222
|
+
|
223
|
+
/**
|
224
|
+
* Gets the most current frame of video and paints it to a resized version of the canvas with width and height
|
225
|
+
* @param {Canvas} canvas - the canvas to paint the video on to
|
226
|
+
* @param {Number} width - the new width of the canvas
|
227
|
+
* @param {Number} height - the new height of the canvas
|
228
|
+
*/
|
229
|
+
function paintCurrentFrame(canvas, width, height) {
|
230
|
+
if (canvas.width !== width) {
|
231
|
+
canvas.width = width;
|
232
|
+
}
|
233
|
+
if (canvas.height !== height) {
|
234
|
+
canvas.height = height;
|
235
|
+
}
|
236
|
+
|
237
|
+
var ctx = canvas.getContext('2d');
|
238
|
+
ctx.drawImage(videoElement, 0, 0, canvas.width, canvas.height);
|
239
|
+
}
|
240
|
+
|
241
|
+
/**
|
242
|
+
* Paints the video to a canvas and runs the prediction pipeline to get a prediction
|
243
|
+
* @param {Number|undefined} regModelIndex - The prediction index we're looking for
|
244
|
+
* @returns {*}
|
245
|
+
*/
|
246
|
+
async function getPrediction(regModelIndex) {
|
247
|
+
var predictions = [];
|
248
|
+
|
249
|
+
if (regs.length === 0) {
|
250
|
+
console.log('regression not set, call setRegression()');
|
251
|
+
return null;
|
252
|
+
}
|
253
|
+
for (var reg in regs) {
|
254
|
+
predictions.push(regs[reg].predict(latestEyeFeatures));
|
255
|
+
}
|
256
|
+
if (regModelIndex !== undefined) {
|
257
|
+
return predictions[regModelIndex] === null ? null : {
|
258
|
+
'x' : predictions[regModelIndex].x,
|
259
|
+
'y' : predictions[regModelIndex].y,
|
260
|
+
'eyeFeatures': latestEyeFeatures
|
261
|
+
};
|
262
|
+
} else {
|
263
|
+
return predictions.length === 0 || predictions[0] === null ? null : {
|
264
|
+
'x' : predictions[0].x,
|
265
|
+
'y' : predictions[0].y,
|
266
|
+
'eyeFeatures': latestEyeFeatures,
|
267
|
+
'all' : predictions
|
268
|
+
};
|
269
|
+
}
|
270
|
+
}
|
271
|
+
|
272
|
+
/* -------------------------------------------------------------------------- */
|
273
|
+
/* LOOP */
|
274
|
+
/* -------------------------------------------------------------------------- */
|
275
|
+
|
276
|
+
/**
|
277
|
+
* Runs every available animation frame if webgazer is not paused
|
278
|
+
*/
|
279
|
+
var smoothingVals = new util.DataWindow(4);
|
280
|
+
var k = 0;
|
281
|
+
|
282
|
+
let _now = null
|
283
|
+
let _last = -1
|
284
|
+
|
285
|
+
// From getting the video frame to get data
|
286
|
+
let _oneLoopFinished = true
|
287
|
+
|
288
|
+
async function gazePrep(forcedPrep = false) {
|
289
|
+
paintCurrentFrame(videoElementCanvas, videoElementCanvas.width, videoElementCanvas.height);
|
290
|
+
|
291
|
+
// [20200617 xk] TODO: this call should be made async somehow. will take some work.
|
292
|
+
if (!webgazer.params.paused || forcedPrep)
|
293
|
+
latestEyeFeatures = await getPupilFeatures(videoElementCanvas, videoElementCanvas.width, videoElementCanvas.height);
|
294
|
+
|
295
|
+
// Draw face overlay
|
296
|
+
if (webgazer.params.showFaceOverlay) {
|
297
|
+
// Get tracker object
|
298
|
+
var tracker = webgazer.getTracker();
|
299
|
+
faceOverlay.getContext('2d').clearRect(0, 0, videoElement.videoWidth, videoElement.videoHeight);
|
300
|
+
tracker.drawFaceOverlay(faceOverlay.getContext('2d'), tracker.getPositions());
|
301
|
+
}
|
302
|
+
|
303
|
+
// Feedback box
|
304
|
+
// Check that the eyes are inside of the validation box
|
305
|
+
if (webgazer.params.showFaceFeedbackBox) checkEyesInValidationBox();
|
306
|
+
}
|
307
|
+
|
308
|
+
async function loop() {
|
309
|
+
_now = window.performance.now()
|
310
|
+
|
311
|
+
if (webgazer.params.videoIsOn) {
|
312
|
+
// [20200617 XK] TODO: there is currently lag between the camera input and the face overlay. This behavior
|
313
|
+
// is not seen in the facemesh demo. probably need to optimize async implementation. I think the issue lies
|
314
|
+
// in the implementation of getPrediction().
|
315
|
+
|
316
|
+
// Paint the latest video frame into the canvas which will be analyzed by WebGazer
|
317
|
+
// [20180729 JT] Why do we need to do this? clmTracker does this itself _already_, which is just duplicating the work.
|
318
|
+
// Is it because other trackers need a canvas instead of an img/video element?
|
319
|
+
if (_oneLoopFinished) {
|
320
|
+
_oneLoopFinished = false
|
321
|
+
webgazer.params.getLatestVideoFrameTimestamp(new Date())
|
322
|
+
}
|
323
|
+
await gazePrep()
|
324
|
+
}
|
325
|
+
|
326
|
+
if (!webgazer.params.paused) {
|
327
|
+
|
328
|
+
if (_now - _last >= 1000. / webgazer.params.framerate) {
|
329
|
+
_last = _now
|
330
|
+
|
331
|
+
// Get gaze prediction (ask clm to track; pass the data to the regressor; get back a prediction)
|
332
|
+
latestGazeData = getPrediction();
|
333
|
+
// Count time
|
334
|
+
// var elapsedTime = performance.now() - clockStart;
|
335
|
+
|
336
|
+
latestGazeData = await latestGazeData;
|
337
|
+
|
338
|
+
// [20200623 xk] callback to function passed into setGazeListener(fn)
|
339
|
+
callback(latestGazeData);
|
340
|
+
_oneLoopFinished = true
|
341
|
+
|
342
|
+
if (latestGazeData) {
|
343
|
+
// [20200608 XK] Smoothing across the most recent 4 predictions, do we need this with Kalman filter?
|
344
|
+
smoothingVals.push(latestGazeData);
|
345
|
+
var x = 0;
|
346
|
+
var y = 0;
|
347
|
+
var len = smoothingVals.length;
|
348
|
+
for (var d in smoothingVals.data) {
|
349
|
+
x += smoothingVals.get(d).x;
|
350
|
+
y += smoothingVals.get(d).y;
|
351
|
+
}
|
352
|
+
|
353
|
+
var pred = util.bound({ x: x / len, y: y / len });
|
354
|
+
|
355
|
+
if (webgazer.params.storingPoints) {
|
356
|
+
// drawCoordinates('blue', pred.x, pred.y); //draws the previous predictions
|
357
|
+
// store the position of the past fifty occuring tracker preditions
|
358
|
+
webgazer.storePoints(pred.x, pred.y, k)
|
359
|
+
++k
|
360
|
+
if (k == 50) k = 0
|
361
|
+
}
|
362
|
+
|
363
|
+
gazeDot.style.transform = `translate(${pred.x}px, ${pred.y}px)`;
|
364
|
+
}
|
365
|
+
|
366
|
+
}
|
367
|
+
} else {
|
368
|
+
gazeDot.style.transform = `translate(-15px, -15px)` // Move out of the display
|
369
|
+
}
|
370
|
+
|
371
|
+
requestAnimationFrame(loop);
|
372
|
+
}
|
373
|
+
|
374
|
+
//is problematic to test
|
375
|
+
//because latestEyeFeatures is not set in many cases
|
376
|
+
|
377
|
+
/**
|
378
|
+
* Records screen position data based on current pupil feature and passes it
|
379
|
+
* to the regression model.
|
380
|
+
* @param {Number} x - The x screen position
|
381
|
+
* @param {Number} y - The y screen position
|
382
|
+
* @param {String} eventType - The event type to store
|
383
|
+
* @returns {null}
|
384
|
+
*/
|
385
|
+
var recordScreenPosition = function(x, y, eventType) {
|
386
|
+
if (webgazer.params.paused) {
|
387
|
+
return;
|
388
|
+
}
|
389
|
+
if (regs.length === 0) {
|
390
|
+
console.log('regression not set, call setRegression()');
|
391
|
+
return null;
|
392
|
+
}
|
393
|
+
for (var reg in regs) {
|
394
|
+
if( latestEyeFeatures )
|
395
|
+
regs[reg].addData(latestEyeFeatures, [x, y], eventType);
|
396
|
+
}
|
397
|
+
};
|
398
|
+
|
399
|
+
/**
|
400
|
+
* Records click data and passes it to the regression model
|
401
|
+
* @param {Event} event - The listened event
|
402
|
+
*/
|
403
|
+
var clickListener = async function(event) {
|
404
|
+
recordScreenPosition(event.clientX, event.clientY, eventTypes[0]); // eventType[0] === 'click'
|
405
|
+
|
406
|
+
if (webgazer.params.saveDataAcrossSessions) {
|
407
|
+
// Each click stores the next data point into localforage.
|
408
|
+
await setGlobalData();
|
409
|
+
|
410
|
+
// // Debug line
|
411
|
+
// console.log('Model size: ' + JSON.stringify(await localforage.getItem(localstorageDataLabel)).length / 1000000 + 'MB');
|
412
|
+
}
|
413
|
+
};
|
414
|
+
|
415
|
+
/**
|
416
|
+
* Records mouse movement data and passes it to the regression model
|
417
|
+
* @param {Event} event - The listened event
|
418
|
+
*/
|
419
|
+
var moveListener = function(event) {
|
420
|
+
if (webgazer.params.paused) {
|
421
|
+
return;
|
422
|
+
}
|
423
|
+
|
424
|
+
var now = performance.now();
|
425
|
+
if (now < moveClock + webgazer.params.moveTickSize) {
|
426
|
+
return;
|
427
|
+
} else {
|
428
|
+
moveClock = now;
|
429
|
+
}
|
430
|
+
recordScreenPosition(event.clientX, event.clientY, eventTypes[1]); //eventType[1] === 'move'
|
431
|
+
};
|
432
|
+
|
433
|
+
/**
|
434
|
+
* Add event listeners for mouse click and move.
|
435
|
+
*/
|
436
|
+
var addMouseEventListeners = function() {
|
437
|
+
//third argument set to true so that we get event on 'capture' instead of 'bubbling'
|
438
|
+
//this prevents a client using event.stopPropagation() preventing our access to the click
|
439
|
+
document.addEventListener('click', clickListener, true);
|
440
|
+
document.addEventListener('mousemove', moveListener, true);
|
441
|
+
};
|
442
|
+
|
443
|
+
/**
|
444
|
+
* Remove event listeners for mouse click and move.
|
445
|
+
*/
|
446
|
+
var removeMouseEventListeners = function() {
|
447
|
+
// must set third argument to same value used in addMouseEventListeners
|
448
|
+
// for this to work.
|
449
|
+
document.removeEventListener('click', clickListener, true);
|
450
|
+
document.removeEventListener('mousemove', moveListener, true);
|
451
|
+
};
|
452
|
+
|
453
|
+
/**
|
454
|
+
* Loads the global data and passes it to the regression model
|
455
|
+
*/
|
456
|
+
async function loadGlobalData() {
|
457
|
+
// Get settings object from localforage
|
458
|
+
// [20200611 xk] still unsure what this does, maybe would be good for Kalman filter settings etc?
|
459
|
+
settings = await localforage.getItem(localstorageSettingsLabel);
|
460
|
+
settings = settings || defaults;
|
461
|
+
|
462
|
+
// Get click data from localforage
|
463
|
+
var loadData = await localforage.getItem(localstorageDataLabel);
|
464
|
+
loadData = loadData || defaults;
|
465
|
+
|
466
|
+
// Set global var data to newly loaded data
|
467
|
+
data = loadData;
|
468
|
+
|
469
|
+
// Load data into regression model(s)
|
470
|
+
for (var reg in regs) {
|
471
|
+
regs[reg].setData(loadData);
|
472
|
+
}
|
473
|
+
|
474
|
+
console.log("loaded stored data into regression model");
|
475
|
+
}
|
476
|
+
|
477
|
+
/**
|
478
|
+
* Adds data to localforage
|
479
|
+
*/
|
480
|
+
async function setGlobalData() {
|
481
|
+
// Grab data from regression model
|
482
|
+
var storeData = regs[0].getData() || data; // Array
|
483
|
+
|
484
|
+
// Store data into localforage
|
485
|
+
localforage.setItem(localstorageSettingsLabel, settings) // [20200605 XK] is 'settings' ever being used?
|
486
|
+
localforage.setItem(localstorageDataLabel, storeData);
|
487
|
+
//TODO data should probably be stored in webgazer object instead of each regression model
|
488
|
+
// -> requires duplication of data, but is likely easier on regression model implementors
|
489
|
+
}
|
490
|
+
|
491
|
+
/**
|
492
|
+
* Clears data from model and global storage
|
493
|
+
*/
|
494
|
+
function clearData() {
|
495
|
+
// Removes data from localforage
|
496
|
+
localforage.clear();
|
497
|
+
|
498
|
+
// Removes data from regression model
|
499
|
+
for (var reg in regs) {
|
500
|
+
regs[reg].init();
|
501
|
+
}
|
502
|
+
}
|
503
|
+
|
504
|
+
/**
|
505
|
+
* Initializes all needed dom elements and begins the loop
|
506
|
+
* @param {URL} stream - The video stream to use
|
507
|
+
*/
|
508
|
+
async function init(initMode = 'all', stream) {
|
509
|
+
//////////////////////////
|
510
|
+
// Video and video preview
|
511
|
+
//////////////////////////
|
512
|
+
|
513
|
+
if (!webgazer.params.videoIsOn) {
|
514
|
+
// used for webgazer.stopVideo() and webgazer.setCameraConstraints()
|
515
|
+
videoStream = stream;
|
516
|
+
|
517
|
+
// create a video element container to enable customizable placement on the page
|
518
|
+
videoContainerElement = document.createElement('div');
|
519
|
+
videoContainerElement.id = webgazer.params.videoContainerId;
|
520
|
+
videoContainerElement.style.display = 'block';
|
521
|
+
// videoContainerElement.style.visibility = webgazer.params.showVideo ? 'visible' : 'hidden';
|
522
|
+
videoContainerElement.style.opacity = webgazer.params.showVideo ? 0.8 : 0;
|
523
|
+
// videoContainerElement.style.position = 'fixed';
|
524
|
+
videoContainerElement.style.left = '10px';
|
525
|
+
videoContainerElement.style.bottom = '10px';
|
526
|
+
videoContainerElement.style.width = webgazer.params.videoViewerWidth + 'px';
|
527
|
+
videoContainerElement.style.height = webgazer.params.videoViewerHeight + 'px';
|
528
|
+
|
529
|
+
videoElement = document.createElement('video');
|
530
|
+
videoElement.setAttribute('playsinline', '');
|
531
|
+
videoElement.id = webgazer.params.videoElementId;
|
532
|
+
videoElement.srcObject = stream;
|
533
|
+
videoElement.autoplay = true;
|
534
|
+
videoElement.style.display = 'block';
|
535
|
+
// videoElement.style.visibility = webgazer.params.showVideo ? 'visible' : 'hidden';
|
536
|
+
videoElement.style.position = 'absolute';
|
537
|
+
// We set these to stop the video appearing too large when it is added for the very first time
|
538
|
+
videoElement.style.width = webgazer.params.videoViewerWidth + 'px';
|
539
|
+
videoElement.style.height = webgazer.params.videoViewerHeight + 'px';
|
540
|
+
|
541
|
+
// Canvas for drawing video to pass to clm tracker
|
542
|
+
videoElementCanvas = document.createElement('canvas');
|
543
|
+
videoElementCanvas.id = webgazer.params.videoElementCanvasId;
|
544
|
+
videoElementCanvas.style.display = 'block';
|
545
|
+
videoElementCanvas.style.opacity = 0;
|
546
|
+
// videoElementCanvas.style.visibility = 'hidden';
|
547
|
+
|
548
|
+
// Face overlay
|
549
|
+
// Shows the CLM tracking result
|
550
|
+
faceOverlay = document.createElement('canvas');
|
551
|
+
faceOverlay.id = webgazer.params.faceOverlayId;
|
552
|
+
faceOverlay.style.display = webgazer.params.showFaceOverlay ? 'block' : 'none';
|
553
|
+
faceOverlay.style.position = 'absolute';
|
554
|
+
|
555
|
+
// Mirror video feed
|
556
|
+
if (webgazer.params.mirrorVideo) {
|
557
|
+
videoElement.style.setProperty("-moz-transform", "scale(-1, 1)");
|
558
|
+
videoElement.style.setProperty("-webkit-transform", "scale(-1, 1)");
|
559
|
+
videoElement.style.setProperty("-o-transform", "scale(-1, 1)");
|
560
|
+
videoElement.style.setProperty("transform", "scale(-1, 1)");
|
561
|
+
videoElement.style.setProperty("filter", "FlipH");
|
562
|
+
faceOverlay.style.setProperty("-moz-transform", "scale(-1, 1)");
|
563
|
+
faceOverlay.style.setProperty("-webkit-transform", "scale(-1, 1)");
|
564
|
+
faceOverlay.style.setProperty("-o-transform", "scale(-1, 1)");
|
565
|
+
faceOverlay.style.setProperty("transform", "scale(-1, 1)");
|
566
|
+
faceOverlay.style.setProperty("filter", "FlipH");
|
567
|
+
}
|
568
|
+
|
569
|
+
// Feedback box
|
570
|
+
// Lets the user know when their face is in the middle
|
571
|
+
faceFeedbackBox = document.createElement('canvas');
|
572
|
+
faceFeedbackBox.id = webgazer.params.faceFeedbackBoxId;
|
573
|
+
faceFeedbackBox.style.display = webgazer.params.showFaceFeedbackBox ? 'block' : 'none';
|
574
|
+
faceFeedbackBox.style.border = 'solid red 4px';
|
575
|
+
faceFeedbackBox.style.position = 'absolute';
|
576
|
+
|
577
|
+
// Add other preview/feedback elements to the screen once the video has shown and its parameters are initialized
|
578
|
+
videoContainerElement.appendChild(videoElement);
|
579
|
+
document.body.appendChild(videoContainerElement);
|
580
|
+
function setupPreviewVideo(e) {
|
581
|
+
|
582
|
+
// All video preview parts have now been added, so set the size both internally and in the preview window.
|
583
|
+
setInternalVideoBufferSizes( videoElement.videoWidth, videoElement.videoHeight );
|
584
|
+
webgazer.setVideoViewerSize( webgazer.params.videoViewerWidth, webgazer.params.videoViewerHeight );
|
585
|
+
|
586
|
+
videoContainerElement.appendChild(videoElementCanvas);
|
587
|
+
webgazer.videoCanvas = videoElementCanvas // !
|
588
|
+
videoContainerElement.appendChild(faceOverlay);
|
589
|
+
videoContainerElement.appendChild(faceFeedbackBox);
|
590
|
+
|
591
|
+
// Run this only once, so remove the event listener
|
592
|
+
e.target.removeEventListener(e.type, setupPreviewVideo);
|
593
|
+
};
|
594
|
+
videoElement.addEventListener('timeupdate', setupPreviewVideo);
|
595
|
+
}
|
596
|
+
|
597
|
+
if (initMode != 'video') {
|
598
|
+
// Gaze dot
|
599
|
+
// Starts offscreen
|
600
|
+
gazeDot = document.createElement('div');
|
601
|
+
gazeDot.id = webgazer.params.gazeDotId;
|
602
|
+
gazeDot.style.display = webgazer.params.showGazeDot ? 'block' : 'none';
|
603
|
+
// gazeDot.style.position = 'fixed';
|
604
|
+
// gazeDot.style.zIndex = 99999;
|
605
|
+
// TODO Customizable width and height
|
606
|
+
gazeDot.style.width = '10px'
|
607
|
+
gazeDot.style.height = '10px'
|
608
|
+
gazeDot.style.left = '-5px';
|
609
|
+
gazeDot.style.top = '-5px'; // Width and height are 10px by default
|
610
|
+
gazeDot.style.transform = `translate(-15px, -15px)`
|
611
|
+
// gazeDot.style.background = 'red';
|
612
|
+
// gazeDot.style.borderRadius = '100%';
|
613
|
+
// gazeDot.style.opacity = '0.5';
|
614
|
+
// gazeDot.style.width = '10px';
|
615
|
+
// gazeDot.style.height = '10px';
|
616
|
+
|
617
|
+
document.body.appendChild(gazeDot);
|
618
|
+
|
619
|
+
addMouseEventListeners();
|
620
|
+
|
621
|
+
//BEGIN CALLBACK LOOP
|
622
|
+
webgazer.params.paused = false;
|
623
|
+
clockStart = performance.now();
|
624
|
+
}
|
625
|
+
|
626
|
+
await loop();
|
627
|
+
}
|
628
|
+
|
629
|
+
/**
|
630
|
+
* Initializes navigator.mediaDevices.getUserMedia
|
631
|
+
* depending on the browser capabilities
|
632
|
+
*
|
633
|
+
* @return Promise
|
634
|
+
*/
|
635
|
+
function setUserMediaVariable(){
|
636
|
+
|
637
|
+
if (navigator.mediaDevices === undefined) {
|
638
|
+
navigator.mediaDevices = {};
|
639
|
+
}
|
640
|
+
|
641
|
+
if (navigator.mediaDevices.getUserMedia === undefined) {
|
642
|
+
navigator.mediaDevices.getUserMedia = function(constraints) {
|
643
|
+
|
644
|
+
// gets the alternative old getUserMedia is possible
|
645
|
+
var getUserMedia = navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
|
646
|
+
|
647
|
+
// set an error message if browser doesn't support getUserMedia
|
648
|
+
if (!getUserMedia) {
|
649
|
+
return Promise.reject(new Error("Unfortunately, your browser does not support access to the webcam through the getUserMedia API. Try to use the latest version of Google Chrome, Mozilla Firefox, Opera, or Microsoft Edge instead."));
|
650
|
+
}
|
651
|
+
|
652
|
+
// uses navigator.getUserMedia for older browsers
|
653
|
+
return new Promise(function(resolve, reject) {
|
654
|
+
getUserMedia.call(navigator, constraints, resolve, reject);
|
655
|
+
});
|
656
|
+
}
|
657
|
+
}
|
658
|
+
}
|
659
|
+
|
660
|
+
//PUBLIC FUNCTIONS - CONTROL
|
661
|
+
|
662
|
+
/**
|
663
|
+
* Starts all state related to webgazer -> dataLoop, video collection, click listener
|
664
|
+
* If starting fails, call `onFail` param function.
|
665
|
+
* @param {Function} onFail - Callback to call in case it is impossible to find user camera
|
666
|
+
* @returns {*}
|
667
|
+
*/
|
668
|
+
webgazer.begin = function(onFail) {
|
669
|
+
// if (window.location.protocol !== 'https:' && window.location.hostname !== 'localhost' && window.chrome){
|
670
|
+
// alert("WebGazer works only over https. If you are doing local development, you need to run a local server.");
|
671
|
+
// }
|
672
|
+
|
673
|
+
// Load model data stored in localforage.
|
674
|
+
// if (webgazer.params.saveDataAcrossSessions) {
|
675
|
+
// loadGlobalData();
|
676
|
+
// }
|
677
|
+
|
678
|
+
onFail = onFail || function() {console.log('No stream')};
|
679
|
+
|
680
|
+
// if (debugVideoLoc) {
|
681
|
+
// init(debugVideoLoc);
|
682
|
+
// return webgazer;
|
683
|
+
// }
|
684
|
+
|
685
|
+
return webgazer._begin(false)
|
686
|
+
};
|
687
|
+
|
688
|
+
/**
|
689
|
+
* Start the video element.
|
690
|
+
*/
|
691
|
+
webgazer.beginVideo = function () {
|
692
|
+
webgazer._begin(true)
|
693
|
+
}
|
694
|
+
|
695
|
+
webgazer._begin = function (videoOnly) {
|
696
|
+
// SETUP VIDEO ELEMENTS
|
697
|
+
// Sets .mediaDevices.getUserMedia depending on browser
|
698
|
+
if (!webgazer.params.videoIsOn) {
|
699
|
+
setUserMediaVariable();
|
700
|
+
|
701
|
+
return new Promise(async (resolve, reject) => {
|
702
|
+
let stream;
|
703
|
+
try {
|
704
|
+
stream = await navigator.mediaDevices.getUserMedia( webgazer.params.camConstraints );
|
705
|
+
init(videoOnly ? 'video' : 'all', stream);
|
706
|
+
//
|
707
|
+
webgazer.params.videoIsOn = true
|
708
|
+
//
|
709
|
+
if (!videoOnly) resolve(webgazer);
|
710
|
+
} catch(err) {
|
711
|
+
console.log(err);
|
712
|
+
onFail();
|
713
|
+
videoElement = null;
|
714
|
+
stream = null;
|
715
|
+
reject(err);
|
716
|
+
}
|
717
|
+
});
|
718
|
+
} else {
|
719
|
+
// Video is ON
|
720
|
+
// e.g. tracking viewing distance already
|
721
|
+
init('gaze')
|
722
|
+
}
|
723
|
+
}
|
724
|
+
|
725
|
+
/**
|
726
|
+
* Checks if webgazer has finished initializing after calling begin()
|
727
|
+
* [20180729 JT] This seems like a bad idea for how this function should be implemented.
|
728
|
+
* @returns {boolean} if webgazer is ready
|
729
|
+
*/
|
730
|
+
webgazer.isReady = function() {
|
731
|
+
if (videoElementCanvas === null) {
|
732
|
+
return false;
|
733
|
+
}
|
734
|
+
return videoElementCanvas.width > 0;
|
735
|
+
};
|
736
|
+
|
737
|
+
/**
|
738
|
+
* Stops collection of data and predictions
|
739
|
+
* @returns {webgazer} this
|
740
|
+
*/
|
741
|
+
webgazer.pause = function() {
|
742
|
+
webgazer.params.paused = true;
|
743
|
+
return webgazer;
|
744
|
+
};
|
745
|
+
|
746
|
+
/* -------------------------------------------------------------------------- */
|
747
|
+
|
748
|
+
webgazer.stopLearning = function () {
|
749
|
+
removeMouseEventListeners()
|
750
|
+
return webgazer
|
751
|
+
}
|
752
|
+
|
753
|
+
webgazer.startLearning = function () {
|
754
|
+
addMouseEventListeners()
|
755
|
+
return webgazer
|
756
|
+
}
|
757
|
+
|
758
|
+
/* -------------------------------------------------------------------------- */
|
759
|
+
|
760
|
+
/**
|
761
|
+
* Resumes collection of data and predictions if paused
|
762
|
+
* @returns {webgazer} this
|
763
|
+
*/
|
764
|
+
webgazer.resume = async function() {
|
765
|
+
if (!webgazer.params.paused) {
|
766
|
+
return webgazer;
|
767
|
+
}
|
768
|
+
webgazer.params.paused = false;
|
769
|
+
_oneLoopFinished = true
|
770
|
+
await loop();
|
771
|
+
return webgazer;
|
772
|
+
};
|
773
|
+
|
774
|
+
/**
|
775
|
+
* stops collection of data and removes dom modifications, must call begin() to reset up
|
776
|
+
* @return {webgazer} this
|
777
|
+
*/
|
778
|
+
webgazer.end = function(endAll = false) {
|
779
|
+
// loop may run an extra time and fail due to removed elements
|
780
|
+
// webgazer.params.paused = true;
|
781
|
+
if (endAll) {
|
782
|
+
smoothingVals = new util.DataWindow(4);
|
783
|
+
k = 0;
|
784
|
+
_now = null;
|
785
|
+
_last = -1;
|
786
|
+
|
787
|
+
hasBounds = false
|
788
|
+
|
789
|
+
webgazer.params.videoIsOn = false
|
790
|
+
setTimeout(() => {
|
791
|
+
webgazer.stopVideo(); // uncomment if you want to stop the video from streaming
|
792
|
+
|
793
|
+
//remove video element and canvas
|
794
|
+
// document.body.removeChild(videoElement);
|
795
|
+
document.body.removeChild(videoContainerElement);
|
796
|
+
}, 500);
|
797
|
+
}
|
798
|
+
return webgazer;
|
799
|
+
};
|
800
|
+
|
801
|
+
/**
|
802
|
+
* Stops the video camera from streaming and removes the video outlines
|
803
|
+
* @return {webgazer} this
|
804
|
+
*/
|
805
|
+
webgazer.stopVideo = function() {
|
806
|
+
// Stops the video from streaming
|
807
|
+
videoStream.getTracks()[0].stop();
|
808
|
+
|
809
|
+
// Removes the outline of the face
|
810
|
+
// document.body.removeChild( faceOverlay );
|
811
|
+
|
812
|
+
// Removes the box around the face
|
813
|
+
// document.body.removeChild( faceFeedbackBox );
|
814
|
+
|
815
|
+
return webgazer;
|
816
|
+
}
|
817
|
+
|
818
|
+
|
819
|
+
//PUBLIC FUNCTIONS - DEBUG
|
820
|
+
|
821
|
+
/**
|
822
|
+
* Returns if the browser is compatible with webgazer
|
823
|
+
* @return {boolean} if browser is compatible
|
824
|
+
*/
|
825
|
+
webgazer.detectCompatibility = function() {
|
826
|
+
|
827
|
+
var getUserMedia = navigator.mediaDevices.getUserMedia ||
|
828
|
+
navigator.getUserMedia ||
|
829
|
+
navigator.webkitGetUserMedia ||
|
830
|
+
navigator.mozGetUserMedia;
|
831
|
+
|
832
|
+
return getUserMedia !== undefined;
|
833
|
+
};
|
834
|
+
|
835
|
+
/**
|
836
|
+
* Set whether to show any of the video previews (camera, face overlay, feedback box).
|
837
|
+
* If true: visibility depends on corresponding params (default all true).
|
838
|
+
* If false: camera, face overlay, feedback box are all hidden
|
839
|
+
* @param {bool} val
|
840
|
+
* @return {webgazer} this
|
841
|
+
*/
|
842
|
+
webgazer.showVideoPreview = function(val) {
|
843
|
+
webgazer.params.showVideoPreview = val;
|
844
|
+
webgazer.showVideo(val && webgazer.params.showVideo);
|
845
|
+
webgazer.showFaceOverlay(val && webgazer.params.showFaceOverlay);
|
846
|
+
webgazer.showFaceFeedbackBox(val && webgazer.params.showFaceFeedbackBox);
|
847
|
+
return webgazer;
|
848
|
+
}
|
849
|
+
|
850
|
+
/**
|
851
|
+
* Set whether the camera video preview is visible or not (default true).
|
852
|
+
* @param {*} bool
|
853
|
+
* @return {webgazer} this
|
854
|
+
*/
|
855
|
+
webgazer.showVideo = function(val, opacity = 0.8) {
|
856
|
+
webgazer.params.showVideo = val;
|
857
|
+
// if (videoElement) {
|
858
|
+
// videoElement.style.visibility = val ? 'visible' : 'hidden';
|
859
|
+
// videoElement.style.opacity = val ? 1 : 0;
|
860
|
+
// }
|
861
|
+
if (videoContainerElement) {
|
862
|
+
// videoContainerElement.style.visibility = val ? 'visible' : 'hidden';
|
863
|
+
videoContainerElement.style.opacity = val ? opacity : 0;
|
864
|
+
}
|
865
|
+
return webgazer;
|
866
|
+
};
|
867
|
+
|
868
|
+
/**
|
869
|
+
* Set whether the face overlay is visible or not (default true).
|
870
|
+
* @param {*} bool
|
871
|
+
* @return {webgazer} this
|
872
|
+
*/
|
873
|
+
webgazer.showFaceOverlay = function(val) {
|
874
|
+
webgazer.params.showFaceOverlay = val;
|
875
|
+
if( faceOverlay ) {
|
876
|
+
faceOverlay.style.display = val ? 'block' : 'none';
|
877
|
+
}
|
878
|
+
return webgazer;
|
879
|
+
};
|
880
|
+
|
881
|
+
/**
|
882
|
+
* Set whether the face feedback box is visible or not (default true).
|
883
|
+
* @param {*} bool
|
884
|
+
* @return {webgazer} this
|
885
|
+
*/
|
886
|
+
webgazer.showFaceFeedbackBox = function(val) {
|
887
|
+
|
888
|
+
webgazer.params.showFaceFeedbackBox = val;
|
889
|
+
if( faceFeedbackBox ) {
|
890
|
+
faceFeedbackBox.style.display = val ? 'block' : 'none';
|
891
|
+
}
|
892
|
+
return webgazer;
|
893
|
+
};
|
894
|
+
|
895
|
+
/**
|
896
|
+
* Set whether the gaze prediction point(s) are visible or not.
|
897
|
+
* Multiple because of a trail of past dots. Default true
|
898
|
+
* @return {webgazer} this
|
899
|
+
*/
|
900
|
+
webgazer.showPredictionPoints = function(val) {
|
901
|
+
webgazer.params.showGazeDot = val;
|
902
|
+
if( gazeDot ) {
|
903
|
+
gazeDot.style.display = val ? 'block' : 'none';
|
904
|
+
}
|
905
|
+
return webgazer;
|
906
|
+
};
|
907
|
+
|
908
|
+
/**
|
909
|
+
* Set whether localprevious calibration data (from localforage) should be loaded.
|
910
|
+
* Default true.
|
911
|
+
*
|
912
|
+
* NOTE: Should be called before webgazer.begin() -- see www/js/main.js for example
|
913
|
+
*
|
914
|
+
* @param val
|
915
|
+
* @returns {webgazer} this
|
916
|
+
*/
|
917
|
+
webgazer.saveDataAcrossSessions = function(val) {
|
918
|
+
webgazer.params.saveDataAcrossSessions = val;
|
919
|
+
return webgazer;
|
920
|
+
}
|
921
|
+
|
922
|
+
/**
|
923
|
+
* Set whether a Kalman filter will be applied to gaze predictions (default true);
|
924
|
+
* @return {webgazer} this
|
925
|
+
*/
|
926
|
+
webgazer.applyKalmanFilter = function(val) {
|
927
|
+
webgazer.params.applyKalmanFilter = val;
|
928
|
+
return webgazer;
|
929
|
+
}
|
930
|
+
|
931
|
+
/**
|
932
|
+
* Define constraints on the video camera that is used. Useful for non-standard setups.
|
933
|
+
* This can be set before calling webgazer.begin(), but also mid stream.
|
934
|
+
*
|
935
|
+
* @param {Object} constraints Example constraints object:
|
936
|
+
* { width: { min: 320, ideal: 1280, max: 1920 }, height: { min: 240, ideal: 720, max: 1080 }, facingMode: "user" };
|
937
|
+
*
|
938
|
+
* Follows definition here:
|
939
|
+
* https://developer.mozilla.org/en-US/docs/Web/API/Media_Streams_API/Constraints
|
940
|
+
*
|
941
|
+
* Note: The constraints set here are applied to the video track only. They also _replace_ any constraints, so be sure to set everything you need.
|
942
|
+
* Warning: Setting a large video resolution will decrease performance, and may require
|
943
|
+
*/
|
944
|
+
webgazer.setCameraConstraints = async function(constraints) {
|
945
|
+
var videoTrack,videoSettings;
|
946
|
+
webgazer.params.camConstraints = constraints;
|
947
|
+
|
948
|
+
// If the camera stream is already up...
|
949
|
+
if(videoStream)
|
950
|
+
{
|
951
|
+
webgazer.pause();
|
952
|
+
videoTrack = videoStream.getVideoTracks()[0];
|
953
|
+
try {
|
954
|
+
await videoTrack.applyConstraints( webgazer.params.camConstraints );
|
955
|
+
videoSettings = videoTrack.getSettings();
|
956
|
+
setInternalVideoBufferSizes( videoSettings.width, videoSettings.height );
|
957
|
+
} catch(err) {
|
958
|
+
console.log( err );
|
959
|
+
return;
|
960
|
+
}
|
961
|
+
// Reset and recompute sizes of the video viewer.
|
962
|
+
// This is only to adjust the feedback box, say, if the aspect ratio of the video has changed.
|
963
|
+
webgazer.setVideoViewerSize( webgazer.params.videoViewerWidth, webgazer.params.videoViewerHeight )
|
964
|
+
webgazer.getTracker().reset();
|
965
|
+
await webgazer.resume();
|
966
|
+
}
|
967
|
+
}
|
968
|
+
|
969
|
+
|
970
|
+
/**
|
971
|
+
* Does what it says on the tin.
|
972
|
+
* @param {*} width
|
973
|
+
* @param {*} height
|
974
|
+
*/
|
975
|
+
function setInternalVideoBufferSizes( width, height ) {
|
976
|
+
// Re-set the canvas size used by the internal processes
|
977
|
+
if( videoElementCanvas )
|
978
|
+
{
|
979
|
+
videoElementCanvas.width = width;
|
980
|
+
videoElementCanvas.height = height;
|
981
|
+
}
|
982
|
+
|
983
|
+
// Re-set the face overlay canvas size
|
984
|
+
if( faceOverlay )
|
985
|
+
{
|
986
|
+
faceOverlay.width = width;
|
987
|
+
faceOverlay.height = height;
|
988
|
+
}
|
989
|
+
}
|
990
|
+
|
991
|
+
/**
|
992
|
+
* Set a static video file to be used instead of webcam video
|
993
|
+
* @param {String} videoLoc - video file location
|
994
|
+
* @return {webgazer} this
|
995
|
+
*/
|
996
|
+
webgazer.setStaticVideo = function(videoLoc) {
|
997
|
+
debugVideoLoc = videoLoc;
|
998
|
+
return webgazer;
|
999
|
+
};
|
1000
|
+
|
1001
|
+
/**
|
1002
|
+
* Set the size of the video viewer
|
1003
|
+
*/
|
1004
|
+
webgazer.setVideoViewerSize = function(w, h) {
|
1005
|
+
|
1006
|
+
webgazer.params.videoViewerWidth = w;
|
1007
|
+
webgazer.params.videoViewerHeight = h;
|
1008
|
+
|
1009
|
+
// Change the video viewer
|
1010
|
+
videoElement.style.width = w + 'px';
|
1011
|
+
videoElement.style.height = h + 'px';
|
1012
|
+
|
1013
|
+
// Change the face overlay
|
1014
|
+
faceOverlay.style.width = w + 'px';
|
1015
|
+
faceOverlay.style.height = h + 'px';
|
1016
|
+
|
1017
|
+
// Change the feedback box size
|
1018
|
+
// Compute the boundaries of the face overlay validation box based on the video size
|
1019
|
+
var tlwh = webgazer.computeValidationBoxSize()
|
1020
|
+
// Assign them to the object
|
1021
|
+
faceFeedbackBox.style.top = tlwh[0] + 'px';
|
1022
|
+
faceFeedbackBox.style.left = tlwh[1] + 'px';
|
1023
|
+
faceFeedbackBox.style.width = tlwh[2] + 'px';
|
1024
|
+
faceFeedbackBox.style.height = tlwh[3] + 'px';
|
1025
|
+
};
|
1026
|
+
|
1027
|
+
/**
|
1028
|
+
* Add the mouse click and move listeners that add training data.
|
1029
|
+
* @return {webgazer} this
|
1030
|
+
*/
|
1031
|
+
webgazer.addMouseEventListeners = function() {
|
1032
|
+
addMouseEventListeners();
|
1033
|
+
return webgazer;
|
1034
|
+
};
|
1035
|
+
|
1036
|
+
/**
|
1037
|
+
* Remove the mouse click and move listeners that add training data.
|
1038
|
+
* @return {webgazer} this
|
1039
|
+
*/
|
1040
|
+
webgazer.removeMouseEventListeners = function() {
|
1041
|
+
removeMouseEventListeners();
|
1042
|
+
return webgazer;
|
1043
|
+
};
|
1044
|
+
|
1045
|
+
/**
|
1046
|
+
* Records current screen position for current pupil features.
|
1047
|
+
* @param {String} x - position on screen in the x axis
|
1048
|
+
* @param {String} y - position on screen in the y axis
|
1049
|
+
* @param {String} eventType - "click" or "move", as per eventTypes
|
1050
|
+
* @return {webgazer} this
|
1051
|
+
*/
|
1052
|
+
webgazer.recordScreenPosition = function(x, y, eventType) {
|
1053
|
+
// give this the same weight that a click gets.
|
1054
|
+
recordScreenPosition(x, y, eventType || eventTypes[0]);
|
1055
|
+
return webgazer;
|
1056
|
+
};
|
1057
|
+
|
1058
|
+
/*
|
1059
|
+
* Stores the position of the fifty most recent tracker preditions
|
1060
|
+
*/
|
1061
|
+
webgazer.storePoints = function(x, y, k) {
|
1062
|
+
xPast50[k] = x;
|
1063
|
+
yPast50[k] = y;
|
1064
|
+
}
|
1065
|
+
|
1066
|
+
//SETTERS
|
1067
|
+
/**
|
1068
|
+
* Sets the tracking module
|
1069
|
+
* @param {String} name - The name of the tracking module to use
|
1070
|
+
* @return {webgazer} this
|
1071
|
+
*/
|
1072
|
+
webgazer.setTracker = function(name) {
|
1073
|
+
if (curTrackerMap[name] === undefined) {
|
1074
|
+
console.log('Invalid tracker selection');
|
1075
|
+
console.log('Options are: ');
|
1076
|
+
for (var t in curTrackerMap) {
|
1077
|
+
console.log(t);
|
1078
|
+
}
|
1079
|
+
return webgazer;
|
1080
|
+
}
|
1081
|
+
curTracker = curTrackerMap[name]();
|
1082
|
+
return webgazer;
|
1083
|
+
};
|
1084
|
+
|
1085
|
+
/**
|
1086
|
+
* Sets the regression module and clears any other regression modules
|
1087
|
+
* @param {String} name - The name of the regression module to use
|
1088
|
+
* @return {webgazer} this
|
1089
|
+
*/
|
1090
|
+
webgazer.setRegression = function(name) {
|
1091
|
+
if (regressionMap[name] === undefined) {
|
1092
|
+
console.log('Invalid regression selection');
|
1093
|
+
console.log('Options are: ');
|
1094
|
+
for (var reg in regressionMap) {
|
1095
|
+
console.log(reg);
|
1096
|
+
}
|
1097
|
+
return webgazer;
|
1098
|
+
}
|
1099
|
+
data = regs[0].getData();
|
1100
|
+
regs = [regressionMap[name]()];
|
1101
|
+
regs[0].setData(data);
|
1102
|
+
return webgazer;
|
1103
|
+
};
|
1104
|
+
|
1105
|
+
/**
|
1106
|
+
* Adds a new tracker module so that it can be used by setTracker()
|
1107
|
+
* @param {String} name - the new name of the tracker
|
1108
|
+
* @param {Function} constructor - the constructor of the curTracker object
|
1109
|
+
* @return {webgazer} this
|
1110
|
+
*/
|
1111
|
+
webgazer.addTrackerModule = function(name, constructor) {
|
1112
|
+
curTrackerMap[name] = function() {
|
1113
|
+
return new constructor();
|
1114
|
+
};
|
1115
|
+
};
|
1116
|
+
|
1117
|
+
/**
|
1118
|
+
* Adds a new regression module so that it can be used by setRegression() and addRegression()
|
1119
|
+
* @param {String} name - the new name of the regression
|
1120
|
+
* @param {Function} constructor - the constructor of the regression object
|
1121
|
+
*/
|
1122
|
+
webgazer.addRegressionModule = function(name, constructor) {
|
1123
|
+
regressionMap[name] = function() {
|
1124
|
+
return new constructor();
|
1125
|
+
};
|
1126
|
+
};
|
1127
|
+
|
1128
|
+
/**
|
1129
|
+
* Adds a new regression module to the list of regression modules, seeding its data from the first regression module
|
1130
|
+
* @param {String} name - the string name of the regression module to add
|
1131
|
+
* @return {webgazer} this
|
1132
|
+
*/
|
1133
|
+
webgazer.addRegression = function(name) {
|
1134
|
+
var newReg = regressionMap[name]();
|
1135
|
+
data = regs[0].getData();
|
1136
|
+
newReg.setData(data);
|
1137
|
+
regs.push(newReg);
|
1138
|
+
return webgazer;
|
1139
|
+
};
|
1140
|
+
|
1141
|
+
/**
|
1142
|
+
* Sets a callback to be executed on every gaze event (currently all time steps)
|
1143
|
+
* @param {function} listener - The callback function to call (it must be like function(data, elapsedTime))
|
1144
|
+
* - No elapsedTime needed for Toolbox
|
1145
|
+
* @return {webgazer} this
|
1146
|
+
*/
|
1147
|
+
webgazer.setGazeListener = function(listener) {
|
1148
|
+
callback = listener;
|
1149
|
+
return webgazer;
|
1150
|
+
};
|
1151
|
+
|
1152
|
+
/**
|
1153
|
+
* Removes the callback set by setGazeListener
|
1154
|
+
* @return {webgazer} this
|
1155
|
+
*/
|
1156
|
+
webgazer.clearGazeListener = function() {
|
1157
|
+
callback = nopCallback;
|
1158
|
+
return webgazer;
|
1159
|
+
};
|
1160
|
+
|
1161
|
+
/**
|
1162
|
+
* Set the video element canvas; useful if you want to run WebGazer on your own canvas (e.g., on any random image).
|
1163
|
+
* @return The current video element canvas
|
1164
|
+
*/
|
1165
|
+
webgazer.setVideoElementCanvas = function(canvas) {
|
1166
|
+
videoElementCanvas = canvas;
|
1167
|
+
return videoElementCanvas;
|
1168
|
+
}
|
1169
|
+
|
1170
|
+
/**
|
1171
|
+
* Clear data from localforage and from regs
|
1172
|
+
*/
|
1173
|
+
webgazer.clearData = async function() {
|
1174
|
+
clearData();
|
1175
|
+
}
|
1176
|
+
|
1177
|
+
|
1178
|
+
//GETTERS
|
1179
|
+
/**
|
1180
|
+
* Returns the tracker currently in use
|
1181
|
+
* @return {tracker} an object following the tracker interface
|
1182
|
+
*/
|
1183
|
+
webgazer.getTracker = function() {
|
1184
|
+
return curTracker;
|
1185
|
+
};
|
1186
|
+
|
1187
|
+
/**
|
1188
|
+
* Returns the regression currently in use
|
1189
|
+
* @return {Array.<Object>} an array of regression objects following the regression interface
|
1190
|
+
*/
|
1191
|
+
webgazer.getRegression = function() {
|
1192
|
+
return regs;
|
1193
|
+
};
|
1194
|
+
|
1195
|
+
/**
|
1196
|
+
* Requests an immediate prediction
|
1197
|
+
* @return {object} prediction data object
|
1198
|
+
*/
|
1199
|
+
webgazer.getCurrentPrediction = async function(regIndex) {
|
1200
|
+
webgazer.params.getLatestVideoFrameTimestamp(new Date())
|
1201
|
+
await gazePrep(true)
|
1202
|
+
return getPrediction(regIndex);
|
1203
|
+
};
|
1204
|
+
|
1205
|
+
/**
|
1206
|
+
* returns the different event types that may be passed to regressions when calling regression.addData()
|
1207
|
+
* @return {Array} array of strings where each string is an event type
|
1208
|
+
*/
|
1209
|
+
webgazer.params.getEventTypes = function() {
|
1210
|
+
return eventTypes.slice();
|
1211
|
+
}
|
1212
|
+
|
1213
|
+
/**
|
1214
|
+
* Get the video element canvas that WebGazer uses internally on which to run its face tracker.
|
1215
|
+
* @return The current video element canvas
|
1216
|
+
*/
|
1217
|
+
webgazer.getVideoElementCanvas = function() {
|
1218
|
+
return videoElementCanvas;
|
1219
|
+
}
|
1220
|
+
|
1221
|
+
/**
|
1222
|
+
* @return array [a,b] where a is width ratio and b is height ratio
|
1223
|
+
*/
|
1224
|
+
webgazer.getVideoPreviewToCameraResolutionRatio = function() {
|
1225
|
+
return [webgazer.params.videoViewerWidth / videoElement.videoWidth, webgazer.params.videoViewerHeight / videoElement.videoHeight];
|
1226
|
+
}
|
1227
|
+
|
1228
|
+
/*
|
1229
|
+
* Gets the fifty most recent tracker preditions
|
1230
|
+
*/
|
1231
|
+
webgazer.getStoredPoints = function() {
|
1232
|
+
return [xPast50, yPast50];
|
1233
|
+
}
|
1234
|
+
|
1235
|
+
export default webgazer;
|