230 lines
9.3 KiB
HTML
230 lines
9.3 KiB
HTML
<!doctype html>
|
|
<html>
|
|
<head>
|
|
<meta charset="utf-8">
|
|
<title>tracking.js - face alignment with camera</title>
|
|
<!-- here is the frame around each example - to be removed - to a fullscreen video - working on mobile too -->
|
|
<!-- <link rel="stylesheet" href="assets/demo.css"> -->
|
|
|
|
<script src="../build/tracking.js"></script>
|
|
<script src="../build/data/face-min.js"></script>
|
|
<script src="../src/alignment/training/Landmarks.js"></script>
|
|
<script src="../src/alignment/training/Regressor.js"></script>
|
|
|
|
<script src="../node_modules/dat.gui/build/dat.gui.min.js"></script>
|
|
</head>
|
|
<body>
|
|
<style>
|
|
#videoWebcam {
|
|
position: absolute;
|
|
top: 0px;
|
|
left: 0px;
|
|
width : 320px;
|
|
height: auto;
|
|
zoom: 3;
|
|
}
|
|
#canvasDetection {
|
|
position: absolute;
|
|
top: 0px;
|
|
left: 0px;
|
|
width : 320px;
|
|
height: auto;
|
|
zoom: 3;
|
|
}
|
|
</style>
|
|
<video id="videoWebcam" width="368" height="288" autoplay loop>
|
|
<source src="./assets/franck.mp4" type="video/mp4"/>
|
|
<source src="./assets/franck.ogv" type="video/ogg"/>
|
|
</video>
|
|
<!-- <video id="videoWebcam" preload autoplay loop muted></video> -->
|
|
<canvas id="canvasDetection"></canvas>
|
|
|
|
<script>
|
|
var canvasDetection = document.querySelector('#canvasDetection');
|
|
canvasDetection.width = 320
|
|
canvasDetection.height = 240
|
|
var context = canvasDetection.getContext('2d');
|
|
|
|
// tracking.LBF.maxNumStages = 10
|
|
var tracker = new tracking.LandmarksTracker();
|
|
tracker.setEdgesDensity(0.1);
|
|
tracker.setInitialScale(4);
|
|
tracker.setStepSize(2);
|
|
|
|
tracker.setInitialScale(2);
|
|
tracker.setStepSize(1);
|
|
|
|
|
|
var gui = new dat.GUI();
|
|
gui.add(tracker, 'edgesDensity', 0.1, 0.5).step(0.01).listen();
|
|
gui.add(tracker, 'initialScale', 1.0, 10.0).step(0.1).listen();
|
|
gui.add(tracker, 'stepSize', 0.5, 5).step(0.1).listen();
|
|
|
|
|
|
var videoElement = document.querySelector('#videoWebcam')
|
|
tracking.track(videoElement, tracker);
|
|
// tracking.track(videoElement, tracker, { camera: true });
|
|
|
|
var landmarksPerFace = 30
|
|
var landmarkFeatures = {
|
|
jaw : {
|
|
first: 0,
|
|
last: 8,
|
|
fillStyle: 'white',
|
|
closed: false,
|
|
},
|
|
nose : {
|
|
first:15,
|
|
last: 18,
|
|
fillStyle: 'green',
|
|
closed: true,
|
|
},
|
|
mouth : {
|
|
first:27,
|
|
last: 30,
|
|
fillStyle: 'red',
|
|
closed: true,
|
|
},
|
|
eyeL : {
|
|
first:19,
|
|
last: 22,
|
|
fillStyle: 'purple',
|
|
closed: false,
|
|
},
|
|
eyeR : {
|
|
first:23,
|
|
last: 26,
|
|
fillStyle: 'purple',
|
|
closed: false,
|
|
},
|
|
eyeBrowL : {
|
|
first: 9,
|
|
last: 11,
|
|
fillStyle: 'yellow',
|
|
closed: false,
|
|
},
|
|
eyeBrowR : {
|
|
first:12,
|
|
last: 14,
|
|
fillStyle: 'yellow',
|
|
closed: false,
|
|
},
|
|
}
|
|
|
|
//////////////////////////////////////////////////////////////////////////////
|
|
// Code Separator
|
|
//////////////////////////////////////////////////////////////////////////////
|
|
var parameters = {
|
|
landmarkLerpFactor : 0.7,
|
|
boundinBoxVisible : true,
|
|
jawVisible : true,
|
|
eyeBrowLVisible : true,
|
|
eyeBrowRVisible : true,
|
|
noseVisible : true,
|
|
eyeLVisible : true,
|
|
eyeRVisible : true,
|
|
mouthVisible : true,
|
|
}
|
|
gui.add(parameters, 'landmarkLerpFactor', 0.0, 1).listen().name('Landmarks Lerp');
|
|
gui.add(parameters, 'boundinBoxVisible', 0.0, 1).listen().name('bounding box');
|
|
Object.keys(landmarkFeatures).forEach(function(featureLabel){
|
|
gui.add(parameters, featureLabel + 'Visible').listen().name(featureLabel);
|
|
})
|
|
|
|
var lerpedFacesLandmarks = []
|
|
|
|
tracker.on('track', function(event) {
|
|
// clear debug canvasDetection
|
|
context.clearRect(0,0,canvasDetection.width, canvasDetection.height);
|
|
|
|
if( event.data === undefined ) return;
|
|
|
|
event.data.faces.forEach(function(boundingBox, faceIndex) {
|
|
var faceLandmarks = event.data.landmarks[faceIndex]
|
|
|
|
if( parameters.boundinBoxVisible === true ) displayFaceLandmarksBoundingBox(boundingBox, faceIndex)
|
|
|
|
// lerpFacesLandmarks
|
|
lerpFacesLandmarks(faceLandmarks)
|
|
|
|
// display each faceLandmarks
|
|
displayFaceLandmarksDot(lerpedFacesLandmarks)
|
|
});
|
|
})
|
|
|
|
function lerpFacesLandmarks(newFaceLandmarks){
|
|
// init lerpFacesLandmarks if needed
|
|
for(var i = 0; i < newFaceLandmarks.length; i++){
|
|
if( lerpedFacesLandmarks[i] !== undefined ) continue
|
|
lerpedFacesLandmarks[i] = [
|
|
newFaceLandmarks[i][0],
|
|
newFaceLandmarks[i][1],
|
|
]
|
|
}
|
|
|
|
// init lerpFacesLandmarks if needed
|
|
for(var i = 0; i < newFaceLandmarks.length; i++){
|
|
var lerpFactor = parameters.landmarkLerpFactor
|
|
lerpedFacesLandmarks[i][0] = newFaceLandmarks[i][0] * lerpFactor + lerpedFacesLandmarks[i][0] * (1-lerpFactor)
|
|
lerpedFacesLandmarks[i][1] = newFaceLandmarks[i][1] * lerpFactor + lerpedFacesLandmarks[i][1] * (1-lerpFactor)
|
|
}
|
|
}
|
|
|
|
//////////////////////////////////////////////////////////////////////////////
|
|
// Code Separator
|
|
//////////////////////////////////////////////////////////////////////////////
|
|
function displayFaceLandmarksBoundingBox(boundingBox, faceIndex){
|
|
// display the box
|
|
context.strokeStyle = '#a64ceb';
|
|
context.strokeRect(boundingBox.x, boundingBox.y, boundingBox.width, boundingBox.height);
|
|
|
|
// display the size of the box
|
|
context.font = '11px Helvetica';
|
|
context.fillStyle = "#fff";
|
|
context.fillText('idx: '+faceIndex, boundingBox.x + boundingBox.width + 5, boundingBox.y + 11);
|
|
context.fillText('x: ' + boundingBox.x + 'px', boundingBox.x + boundingBox.width + 5, boundingBox.y + 22);
|
|
context.fillText('y: ' + boundingBox.y + 'px', boundingBox.x + boundingBox.width + 5, boundingBox.y + 33);
|
|
}
|
|
|
|
function displayFaceLandmarksDot(faceLandmarks){
|
|
Object.keys(landmarkFeatures).forEach(function(featureLabel){
|
|
if( parameters[featureLabel+'Visible'] === false ) return
|
|
displayFaceLandmarksFeature(faceLandmarks, featureLabel)
|
|
})
|
|
}
|
|
function displayFaceLandmarksFeature(faceLandmarks, featureLabel){
|
|
var feature = landmarkFeatures[featureLabel]
|
|
|
|
// draw dots
|
|
context.fillStyle = feature.fillStyle
|
|
for(var i = feature.first; i <= feature.last; i++){
|
|
var xy = faceLandmarks[i]
|
|
context.beginPath();
|
|
context.arc(xy[0],xy[1],1,0,2*Math.PI);
|
|
context.fill();
|
|
}
|
|
|
|
// draw lines
|
|
var feature = landmarkFeatures[featureLabel]
|
|
context.strokeStyle = feature.fillStyle
|
|
context.beginPath();
|
|
for(var i = feature.first; i <= feature.last; i++){
|
|
var x = faceLandmarks[i][0]
|
|
var y = faceLandmarks[i][1]
|
|
if( i === 0 ){
|
|
context.moveTo(x, y)
|
|
}else{
|
|
context.lineTo(x, y)
|
|
}
|
|
}
|
|
if( feature.closed === true ){
|
|
var x = faceLandmarks[feature.first][0]
|
|
var y = faceLandmarks[feature.first][1]
|
|
context.lineTo(x, y)
|
|
}
|
|
|
|
context.stroke();
|
|
|
|
}
|
|
</script></body>
|