punchtheclock

This commit is contained in:
shijing 2022-01-12 10:33:37 +08:00
parent da1bbf36c8
commit 46048efa86
114 changed files with 112063 additions and 238 deletions

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.2 KiB

View File

@ -0,0 +1,3 @@
{
"directory": "../"
}

View File

@ -0,0 +1,14 @@
# editorconfig.org
root = true
[*]
indent_style = tab
end_of_line = lf
charset = utf-8
trim_trailing_whitespace = true
insert_final_newline = true
indent_style = space
indent_size = 2
[*.md]
trim_trailing_whitespace = false

View File

@ -0,0 +1,3 @@
.DS_Store
node_modules
test/assets/benchmark.json

View File

@ -0,0 +1,30 @@
{
"asi": false,
"bitwise": false,
"curly": true,
"eqeqeq": true,
"esnext": true,
"evil": false,
"forin": false,
"globals": {
"document": true,
"navigator": true,
"tracking": true,
"window": true
},
"immed": true,
"indent": 2,
"lastsemic": false,
"maxdepth": false,
"multistr": false,
"newcap": true,
"noarg": true,
"node": true,
"onevar": false,
"quotmark": "single",
"regexp": true,
"smarttabs": true,
"trailing": true,
"undef": true,
"unused": true
}

View File

@ -0,0 +1,4 @@
language: node_js
node_js:
- "0.11"
- "0.10"

View File

@ -0,0 +1,30 @@
Software License Agreement (BSD License)
Copyright (c) 2014, Eduardo A. Lundgren Melo.
All rights reserved.
Redistribution and use of this software in source and binary forms, with or without modification, are
permitted provided that the following conditions are met:
* Redistributions of source code must retain the above
copyright notice, this list of conditions and the
following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the
following disclaimer in the documentation and/or other
materials provided with the distribution.
* The name of Eduardo A. Lundgren Melo may not be used to endorse or promote products
derived from this software without specific prior
written permission of Eduardo A. Lundgren Melo.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -0,0 +1,100 @@
![Banner](banner.svg)
<div align=center>
:point_right: **https://github.com/eduardolundgren/tracking.js/issues/395** :point_left:
</div>
---
# tracking.js
[![Build Status](http://img.shields.io/travis/eduardolundgren/tracking.js.svg?style=flat)](https://travis-ci.org/eduardolundgren/tracking.js)
[![DevDependencies Status](http://img.shields.io/david/dev/eduardolundgren/tracking.js.svg?style=flat)](https://david-dm.org/eduardolundgren/tracking.js#info=devDependencies)
The tracking.js library brings different computer vision algorithms and techniques into the browser environment. By using modern HTML5 specifications, we enable you to do real-time color tracking, face detection and much more — all that with a lightweight core (~7 KB) and intuitive interface.
* [Official website](http://trackingjs.com)
* [Documentation](http://trackingjs.com/docs.html)
* [API Docs](http://trackingjs.com/api/)
## Install
Install via [Bower](http://bower.io/), [npm](https://www.npmjs.com/), or [download as a zip](https://github.com/eduardolundgren/tracking.js/archive/master.zip):
```
bower install tracking
```
```
npm install tracking
```
## Examples
[![Demo 1](https://cloud.githubusercontent.com/assets/398893/3709347/ec72876c-1453-11e4-8450-149d06d487f2.jpg)](http://trackingjs.com/examples/face_tag_friends.html)
[![Demo 2](https://cloud.githubusercontent.com/assets/398893/3709357/1a1c2e16-1454-11e4-804d-e6ada6c65997.jpg)](http://trackingjs.com/examples/face_fish_tank.html)
[![Demo 3](https://cloud.githubusercontent.com/assets/398893/3709361/38f86e8a-1454-11e4-811d-52bd21b37e85.jpg)](http://trackingjs.com/examples/color_hexgl.html)
[![Demo 4](https://cloud.githubusercontent.com/assets/398893/3709464/5447a302-1456-11e4-96b2-d2fae28e2a01.jpg)](http://trackingjs.com/examples/color_draw_something.html)
[![Demo 5](https://cloud.githubusercontent.com/assets/398893/3709469/6a3e859a-1456-11e4-982a-d46a55890e1e.jpg)](http://trackingjs.com/examples/color_fish_tank.html)
## Features
* [Trackers](http://trackingjs.com/docs.html#trackers)
* [Color Tracker](http://trackingjs.com/docs.html#color-tracker)
* [Object Tracker](http://trackingjs.com/docs.html#object-tracker)
* [Utilities](http://trackingjs.com/docs.html#utilities)
* [Feature Detection (Fast)](http://trackingjs.com/docs.html#feature-detection)
* [Feature Descriptor (Brief)](http://trackingjs.com/docs.html#feature-descriptor)
* [Convolution](http://trackingjs.com/docs.html#convolution)
* [Gray Scale](http://trackingjs.com/docs.html#gray-scale)
* [Image Blur](http://trackingjs.com/docs.html#image-blur)
* [Integral Image](http://trackingjs.com/docs.html#integral-image)
* [Sobel](http://trackingjs.com/docs.html#sobel)
* [Viola Jones](http://trackingjs.com/docs.html#viola-jones)
* [Web Components](http://trackingjs.com/docs.html#web-components)
* [Color Element](http://trackingjs.com/docs.html#color-element)
* [Object Element](http://trackingjs.com/docs.html#object-element)
## Browser Support
You can plug *tracking.js* into some well supported HTML elements such as `<canvas>`, `<video>` and `<img>`.
![IE](https://cloud.githubusercontent.com/assets/398893/3528325/20373e76-078e-11e4-8e3a-1cb86cf506f0.png) | ![Chrome](https://cloud.githubusercontent.com/assets/398893/3528328/23bc7bc4-078e-11e4-8752-ba2809bf5cce.png) | ![Firefox](https://cloud.githubusercontent.com/assets/398893/3528329/26283ab0-078e-11e4-84d4-db2cf1009953.png) | ![Opera](https://cloud.githubusercontent.com/assets/398893/3528330/27ec9fa8-078e-11e4-95cb-709fd11dac16.png) | ![Safari](https://cloud.githubusercontent.com/assets/398893/3528331/29df8618-078e-11e4-8e3e-ed8ac738693f.png)
--- | --- | --- | --- | --- |
IE 9+ ✔ | Latest ✔ | Latest ✔ | Latest ✔ | Latest ✔ |
However, the browser support may vary if you request the user's camera (which relies on [getUserMedia API](http://caniuse.com/#feat=stream)).
## Roadmap
- [ ] Optical flow
- [ ] Face recognition
- [ ] Pose estimation
- [ ] Faster keypoint descriptor (BRIEF)
- [ ] More trainings (Hand, car plate, etc)
## Contributing
1. Fork it!
2. Create your feature branch: `git checkout -b my-new-feature`
3. Commit your changes: `git commit -m 'Add some feature'`
4. Push to the branch: `git push origin my-new-feature`
5. Submit a pull request :D
## History
For detailed changelog, check [Releases](https://github.com/eduardolundgren/tracking.js/releases).
## Team
*tracking.js* is maintained by these people and a bunch of awesome [contributors](https://github.com/eduardolundgren/tracking.js/graphs/contributors).
[![Eduardo Lundgren](https://2.gravatar.com/avatar/42327de520e674a6d1686845b30778d0)](https://github.com/eduardolundgren) | [![Thiago Rocha](https://2.gravatar.com/avatar/09c627c62a26a770200819a41a71a3eb)](https://github.com/thiago-rocha) | [![Zeno Rocha](https://2.gravatar.com/avatar/e190023b66e2b8aa73a842b106920c93)](https://github.com/zenorocha) | [![Pablo Carvalho](https://2.gravatar.com/avatar/ae10d2692a6adbf051c6d4255e222df8)](https://github.com/pablocp) | [![Maira Bello](https://2.gravatar.com/avatar/97e0e62c9c02badba4c321f7613e6acf)](https://github.com/mairatma) | [![Jerome Etienne](https://2.gravatar.com/avatar/b381880f9f81065247ba9a0b7ff68358)](https://github.com/jeromeetienne)
--- | --- | --- | --- | --- | ---
[Eduardo Lundgren](https://github.com/eduardolundgren) | [Thiago Rocha](https://github.com/thiago-rocha) | [Zeno Rocha](https://github.com/zenorocha) | [Pablo Carvalho](https://github.com/pablocp) | [Maira Bello](https://github.com/mairatma) | [Jerome Etienne](https://github.com/jeromeetienne)
## License
[BSD License](https://github.com/eduardolundgren/tracking.js/blob/master/LICENSE.md) © Eduardo Lundgren

View File

@ -0,0 +1,37 @@
### Face tracking
- DONE display line with the face
- impressive speed and accuracy from @clmtrackr - https://github.com/auduno/clmtrackr
- http://blog.dlib.net/2014/08/real-time-face-pose-estimation.html
- dlib implementation
- PerspectiveCamera.setViewOffset - todo the https://www.youtube.com/watch?v=LEPvUfC7wh8
- support for webworker ?
- it consume a lot of cpu
- try blur in source image
- get a video on the internet to use as example
- DONE do lerp on output
---
### Misc
- fix image source
- handle a proper versioning
- master is last stable
- stable is tagged in github repo
- dev is 'next-stable'
- Tracking.Image without destination buffer - force reallocation
- allow to provide destination, if not present,
- three.js is r67 in the examples - current three.js is r86
- TODO port on current three.js
- some examples are not running well - list which one
- webcam one ?
- some examples are unclear - no instructions
- provide info in color tracking on how to run it
- TODO list which one
- add more interactive examples - stuff i can try with a webcam
- merge lots of good PR
- https://github.com/eduardolundgren/tracking.js/pull/229 - Add support for Safari 11
- https://github.com/eduardolundgren/tracking.js/pull/144 Regressing Local Binary Features more details on face detection
- https://github.com/eduardolundgren/tracking.js/pull/164 - Creating conversor from haarcascade to tracking.js array
- https://github.com/eduardolundgren/tracking.js/pull/131 <- merge or close
- ```gulp test``` fails in the benchmarks

View File

@ -0,0 +1,101 @@
<!DOCTYPE HTML>
<html lang="en-US">
<head>
<meta charset="UTF-8">
<title></title>
<script type="text/javascript" src="opencv_haarcascade_frontalface_alt.js"></script>
<script type="text/javascript" src="opencv_haarcascade_eye.js"></script>
<script type="text/javascript" src="opencv_haarcascade_upper_body.js"></script>
<script type="text/javascript" src="opencv_haarcascade_mouth.js"></script>
</head>
<body>
<script>
// [
// [
// -1, // index
// 0.8226894140243530, // stage threshold
// // tree
// [
// // node 1
// [
// 3, 7, 14, 4, -1, // rect1
// 3, 9, 14, 2, 2, // rect 2
// 4.0141958743333817e-003, // node threshold
// 0.0337941907346249, // left
// 0.8378106951713562 // right
// ],
// // node 2
// [
// 3, 7, 14, 4, -1,
// 3, 9, 14, 2, 2,
// 4.0141958743333817e-003,
// 0.0337941907346249,
// 0.8378106951713562
// ]
// ]
// ]
// ];
var toFloat = function(v) {
// return parseFloat(parseFloat(v).toFixed(1));
return parseFloat(v);
},
toInt = function(v) {
return parseInt(v, 10);
},
convert = function(haarcascade) {
var stages = [],
hstages = haarcascade.stages,
i,
j;
for (i = 0; i < hstages.length; i++) {
var stage = [],
trees = [],
hstage = hstages[i],
htrees = hstage.trees,
parent = toInt(hstage.parent),
stageThreshold = toFloat(hstage.stage_threshold);
for (j = 0; j < htrees.length; j++) {
var node = [],
hnode = htrees[j][0],
hnodeThreshold = toFloat(hnode.threshold),
hnodeLeft = toFloat(hnode.left_val),
hnodeRight = toFloat(hnode.right_val),
hnodeRects = hnode.feature.rects,
hr,
r;
for (r = 0; r < hnodeRects.length; r++) {
hr = hnodeRects[r].split(" ").map(toFloat),
node = node.concat(hr);
}
node.push(hnodeThreshold, hnodeLeft, hnodeRight);
trees.push(node);
}
stage.push(parent, stageThreshold, trees);
stages.push(stage);
}
console.log(stages);
return JSON.stringify(stages);
};
// output
// var json = convert(opencv_haarcascade_frontalface_alt);
// var json = convert(opencv_haarcascade_eye);
// var json = convert(opencv_haarcascade_upper_body);
var json = convert(opencv_haarcascade_mouth);
console.log(json);
</script>
</body>
</html>

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" width="888px" height="443px" viewBox="0 0 888 443" version="1.1" style="baseline-shift: 0px; clip-rule: nonzero; color: rgb(0, 0, 0); color-interpolation: srgb; color-interpolation-filters: linearrgb; color-rendering: auto; cursor: auto; direction: ltr; fill: rgb(0, 0, 0); fill-opacity: 1; fill-rule: nonzero; font: 400 16px / 18.4px Nunito, sans-serif; image-rendering: auto; letter-spacing: normal; marker: none; overflow: hidden; paint-order: normal; pointer-events: auto; shape-rendering: auto; stroke: none; stroke-dasharray: none; stroke-dashoffset: 0px; stroke-linecap: butt; stroke-linejoin: miter; stroke-miterlimit: 4; stroke-opacity: 1; stroke-width: 1px; text-anchor: start; text-decoration: none solid rgb(0, 0, 0); text-rendering: auto; visibility: visible; word-spacing: 0px; writing-mode: horizontal-tb;" xmlns:xlink="http://www.w3.org/1999/xlink"><g id="github-banner" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd" style="baseline-shift: 0px; fill: none; fill-rule: evenodd; paint-order: normal; text-decoration: none solid rgb(0, 0, 0);"><g id="gh-title" fill="#000000" style="baseline-shift: 0px; fill: rgb(0, 0, 0); paint-order: normal; text-decoration: none solid rgb(0, 0, 0);"><text id="name" font-family="Roboto-Bold, Roboto, -apple-system, BlinkMacSystemFont, Segoe UI, Roboto, Helvetica, Arial, sans-serif, Apple Color Emoji, Segoe UI Emoji, Segoe UI Symbol" font-size="48" font-weight="bold" text-anchor="middle" style="baseline-shift: 0px; display: block; font-weight: 700; font-size: 48px; line-height: 55.2px; font-family: Roboto-Bold, Roboto, -apple-system, system-ui, &quot;Segoe UI&quot;, Roboto, Helvetica, Arial, sans-serif, &quot;Apple Color Emoji&quot;, &quot;Segoe UI Emoji&quot;, &quot;Segoe UI Symbol&quot;; paint-order: normal; text-anchor: middle; text-decoration: none solid rgb(0, 0, 0);"><tspan x="444" y="210" style="baseline-shift: 0px; paint-order: normal; text-decoration: none solid rgb(0, 0, 0);">Maintainers Wanted</tspan></text><text id="description" font-family="Roboto-Regular, Roboto, -apple-system, BlinkMacSystemFont, Segoe UI, Roboto, Helvetica, Arial, sans-serif, Apple Color Emoji, Segoe UI Emoji, Segoe UI Symbol" font-size="18" font-weight="normal" text-anchor="middle" style="baseline-shift: 0px; display: block; font-size: 18px; line-height: 20.7px; font-family: Roboto-Regular, Roboto, -apple-system, system-ui, &quot;Segoe UI&quot;, Roboto, Helvetica, Arial, sans-serif, &quot;Apple Color Emoji&quot;, &quot;Segoe UI Emoji&quot;, &quot;Segoe UI Symbol&quot;; paint-order: normal; text-anchor: middle; text-decoration: none solid rgb(0, 0, 0);"><tspan x="444" y="259" style="baseline-shift: 0px; paint-order: normal; text-decoration: none solid rgb(0, 0, 0);">We are looking for contributors to help with this project!</tspan></text></g></g></svg>

After

Width:  |  Height:  |  Size: 2.8 KiB

View File

@ -0,0 +1,22 @@
{
"name": "tracking",
"homepage": "http://trackingjs.com",
"authors": [
"Eduardo Lundgren <edu@rdo.io>"
],
"description": "Augmented Reality JavaScript Framework.",
"main": "build/tracking.js",
"keywords": [
"tracking",
"webrtc"
],
"license": "BSD",
"ignore": [
"**/.*",
"node_modules"
],
"dependencies": {
"dat-gui": "0.5.0",
"threejs": "r67"
}
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

View File

@ -0,0 +1,94 @@
function initGUIControllers(tracker) {
// GUI Controllers
var gui = new dat.GUI();
var trackedColors = {
custom: false
};
Object.keys(tracking.ColorTracker.knownColors_).forEach(function(color) {
trackedColors[color] = true;
});
tracker.customColor = '#000000';
function createCustomColor(value) {
var components = /^#?([a-f\d]{2})([a-f\d]{2})([a-f\d]{2})$/i.exec(value);
var customColorR = parseInt(components[1], 16);
var customColorG = parseInt(components[2], 16);
var customColorB = parseInt(components[3], 16);
var colorTotal = customColorR + customColorG + customColorB;
if (colorTotal === 0) {
tracking.ColorTracker.registerColor('custom', function(r, g, b) {
return r + g + b < 10;
});
} else {
var rRatio = customColorR / colorTotal;
var gRatio = customColorG / colorTotal;
tracking.ColorTracker.registerColor('custom', function(r, g, b) {
var colorTotal2 = r + g + b;
if (colorTotal2 === 0) {
if (colorTotal < 10) {
return true;
}
return false;
}
var rRatio2 = r / colorTotal2,
gRatio2 = g / colorTotal2,
deltaColorTotal = colorTotal / colorTotal2,
deltaR = rRatio / rRatio2,
deltaG = gRatio / gRatio2;
return deltaColorTotal > 0.9 && deltaColorTotal < 1.1 &&
deltaR > 0.9 && deltaR < 1.1 &&
deltaG > 0.9 && deltaG < 1.1;
});
}
updateColors();
}
function updateColors() {
var colors = [];
for (var color in trackedColors) {
if (trackedColors[color]) {
colors.push(color);
}
}
tracker.setColors(colors);
}
var colorsFolder = gui.addFolder('Colors');
Object.keys(trackedColors).forEach(function(color) {
if (color !== 'custom') {
colorsFolder.add(trackedColors, color).onFinishChange(updateColors);
}
});
colorsFolder.add(trackedColors, 'custom').onFinishChange(function(value) {
if (value) {
this.customColorElement = colorsFolder.addColor(tracker, 'customColor').onChange(createCustomColor);
} else {
colorsFolder.remove(this.customColorElement);
}
});
var parametersFolder = gui.addFolder('Parameters');
parametersFolder.add(tracker, 'minDimension', 1, 100);
parametersFolder.add(tracker, 'minGroupSize', 1, 100);
colorsFolder.open();
parametersFolder.open();
updateColors();
}

View File

@ -0,0 +1,57 @@
* {
margin: 0;
padding: 0;
font-family: Helvetica, Arial, sans-serif;
}
.demo-title {
position: absolute;
width: 100%;
background: #2e2f33;
z-index: 2;
padding: .7em 0;
}
.demo-title a {
color: #fff;
border-bottom: 1px dotted #a64ceb;
text-decoration: none;
}
.demo-title p {
color: #fff;
text-align: center;
text-transform: lowercase;
font-size: 15px;
}
.demo-frame {
background: url(frame.png) no-repeat;
width: 854px;
height: 658px;
position: fixed;
top: 50%;
left: 50%;
margin: -329px 0 0 -429px;
padding: 95px 20px 45px 34px;
overflow: hidden;
-webkit-box-sizing: border-box;
-moz-box-sizing: border-box;
-ms-box-sizing: border-box;
box-sizing: border-box;
}
.demo-container {
width: 100%;
height: 530px;
position: relative;
background: #eee;
overflow: hidden;
border-bottom-right-radius: 10px;
border-bottom-left-radius: 10px;
}
.dg.ac {
z-index: 100 !important;
top: 50px !important;
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 285 KiB

View File

@ -0,0 +1,111 @@
(function() {
var FishTankRenderer = function() {};
FishTankRenderer.prototype.init = function(container) {
if (!FishTankRenderer.isWebGLEnabled()) {
throw new Error('WebGL is not enabled in your browser.');
}
var mesh, geometry;
this.spheres = [];
this.camera = new THREE.PerspectiveCamera(60, window.innerWidth / window.innerHeight, 1, 100000);
this.camera.position.z = 3200;
this.scene = new THREE.Scene();
var geometry = new THREE.SphereGeometry(100, 32, 16);
var path = 'assets/fish_tank/';
var format = '.png';
var urls = [
path + 'px' + format, path + 'nx' + format,
path + 'py' + format, path + 'ny' + format,
path + 'pz' + format, path + 'nz' + format
];
var textureCube = THREE.ImageUtils.loadTextureCube(urls);
var material = new THREE.MeshBasicMaterial({
color: 0xffffff,
envMap: textureCube
});
for (var i = 0; i < 500; i++) {
var mesh = new THREE.Mesh(geometry, material);
mesh.position.x = Math.random() * 100000 - 50000;
mesh.position.y = Math.random() * 100000 - 50000;
mesh.position.z = Math.random() * 100000 - 50000;
mesh.scale.x = mesh.scale.y = mesh.scale.z = Math.random() * 3 + 1;
this.scene.add(mesh);
this.spheres.push(mesh);
}
// Skybox
var shader = THREE.ShaderLib["cube"];
shader.uniforms["tCube"].value = textureCube;
var material = new THREE.ShaderMaterial({
fragmentShader: shader.fragmentShader,
vertexShader: shader.vertexShader,
uniforms: shader.uniforms,
side: THREE.BackSide
}),
mesh = new THREE.Mesh(new THREE.BoxGeometry(100000, 100000, 100000), material);
this.scene.add(mesh);
var _params = {
minFilter: THREE.LinearFilter,
magFilter: THREE.NearestFilter,
format: THREE.RGBAFormat
};
var width = window.innerWidth || 2;
var height = window.innerHeight || 2;
this.renderer = new THREE.WebGLRenderer(width, height, _params);
container.appendChild(this.renderer.domElement);
this.renderer.setSize(width, height);
};
FishTankRenderer.prototype.render = function(controlX, controlY) {
var timer = 0.0001 * Date.now();
this.camera.position.x += (-controlX - this.camera.position.x) * 0.05;
this.camera.position.y += (-controlY - this.camera.position.y) * 0.05;
this.camera.lookAt(this.scene.position);
for (var i = 0, il = this.spheres.length; i < il; i++) {
var sphere = this.spheres[i];
sphere.position.x += 50 * Math.cos(timer + i);
sphere.position.y += 50 * Math.sin(timer + i * 1.1);
}
this.renderer.render(this.scene, this.camera);
};
FishTankRenderer.isWebGLEnabled = function() {
try {
var canvas = document.createElement('canvas');
return !!window.WebGLRenderingContext &&
(canvas.getContext('webgl') || canvas.getContext('experimental-webgl'));
} catch (e) {
return false;
}
};
window.FishTankRenderer = FishTankRenderer;
})();

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 83 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 71 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 75 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 87 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

View File

@ -0,0 +1 @@
function HSVtoRGB(h,s,v,opacity){var toHex=function(decimalValue,places){if(places==undefined||isNaN(places))places=2;var hex=new Array("0","1","2","3","4","5","6","7","8","9","A","B","C","D","E","F");var next=0;var hexidecimal="";decimalValue=Math.floor(decimalValue);while(decimalValue>0){next=decimalValue%16;decimalValue=Math.floor((decimalValue-next)/16);hexidecimal=hex[next]+hexidecimal}while(hexidecimal.length<places){hexidecimal="0"+hexidecimal}return hexidecimal};var hi=Math.floor(h/60)%6;var f=h/60-Math.floor(h/60);var p=v*(1-s);var q=v*(1-f*s);var t=v*(1-(1-f)*s);var r=v;var g=t;var b=p;switch(hi){case 1:r=q;g=v;b=p;break;case 2:r=p;g=v;b=t;break;case 3:r=p;g=q;b=v;break;case 4:r=t;g=p;b=v;break;case 5:r=v;g=p;b=q;break}if(opacity){return"rgba("+Math.round(255*r)+","+Math.round(255*g)+","+Math.round(255*b)+","+opacity+")"}else{return"#"+toHex(r*255)+toHex(g*255)+toHex(b*255)}}function hexToCanvasColor(hexColor,opacity){opacity=opacity||"1.0";hexColor=hexColor.replace("#","");var r=parseInt(hexColor.substring(0,2),16);var g=parseInt(hexColor.substring(2,4),16);var b=parseInt(hexColor.substring(4,6),16);return"rgba("+r+","+g+","+b+","+opacity+")"}function drawPoint(ctx,x,y,r,color){ctx.save();ctx.beginPath();ctx.lineWidth=1;ctx.fillStyle=hexToCanvasColor(color,1);ctx.arc(x,y,r,0,2*Math.PI,false);ctx.closePath();ctx.stroke();ctx.fill();ctx.restore()}function getControlPoints(x0,y0,x1,y1,x2,y2,t){var d01=Math.sqrt(Math.pow(x1-x0,2)+Math.pow(y1-y0,2));var d12=Math.sqrt(Math.pow(x2-x1,2)+Math.pow(y2-y1,2));var fa=t*d01/(d01+d12);var fb=t-fa;var p1x=x1+fa*(x0-x2);var p1y=y1+fa*(y0-y2);var p2x=x1-fb*(x0-x2);var p2y=y1-fb*(y0-y2);return[p1x,p1y,p2x,p2y]}function drawControlLine(ctx,x,y,px,py){ctx.save();ctx.beginPath();ctx.lineWidth=1;ctx.strokeStyle="rgba(0,0,0,0.3)";ctx.moveTo(x,y);ctx.lineTo(px,py);ctx.closePath();ctx.stroke();drawPoint(ctx,px,py,1.5,"#000000");ctx.restore()}function drawSpline(ctx,pts,t,closed){showDetails=true;ctx.lineWidth=4;ctx.save();var cp=[];var n=pts.length;if(closed){pts.push(pts[0],pts[1],pts[2],pts[3]);pts.unshift(pts[n-1]);pts.unshift(pts[n-1]);for(var i=0;i<n;i+=2){cp=cp.concat(getControlPoints(pts[i],pts[i+1],pts[i+2],pts[i+3],pts[i+4],pts[i+5],t))}cp=cp.concat(cp[0],cp[1]);for(var i=2;i<n+2;i+=2){var color=HSVtoRGB(Math.floor(240*(i-2)/(n-2)),.8,.8);if(!showDetails){color="#555555"}ctx.strokeStyle=hexToCanvasColor(color,.75);ctx.beginPath();ctx.moveTo(pts[i],pts[i+1]);ctx.bezierCurveTo(cp[2*i-2],cp[2*i-1],cp[2*i],cp[2*i+1],pts[i+2],pts[i+3]);ctx.stroke();ctx.closePath()}}else{for(var i=0;i<n-4;i+=2){cp=cp.concat(getControlPoints(pts[i],pts[i+1],pts[i+2],pts[i+3],pts[i+4],pts[i+5],t))}for(var i=2;i<pts.length-5;i+=2){var color=HSVtoRGB(Math.floor(240*(i-2)/(n-2)),.8,.8);if(!showDetails){color="#555555"}ctx.strokeStyle=hexToCanvasColor(color,.75);ctx.beginPath();ctx.moveTo(pts[i],pts[i+1]);ctx.bezierCurveTo(cp[2*i-2],cp[2*i-1],cp[2*i],cp[2*i+1],pts[i+2],pts[i+3]);ctx.stroke();ctx.closePath()}var color=HSVtoRGB(40,.4,.4);if(!showDetails){color="#555555"}ctx.strokeStyle=hexToCanvasColor(color,.75);ctx.beginPath();ctx.moveTo(pts[0],pts[1]);ctx.quadraticCurveTo(cp[0],cp[1],pts[2],pts[3]);ctx.stroke();ctx.closePath();var color=HSVtoRGB(240,.8,.8);if(!showDetails){color="#555555"}ctx.strokeStyle=hexToCanvasColor(color,.75);ctx.beginPath();ctx.moveTo(pts[n-2],pts[n-1]);ctx.quadraticCurveTo(cp[2*n-10],cp[2*n-9],pts[n-4],pts[n-3]);ctx.stroke();ctx.closePath()}ctx.restore()}

View File

@ -0,0 +1,31 @@
// stats.js - http://github.com/mrdoob/stats.js
var Stats=function(){var l=Date.now(),m=l,g=0,n=Infinity,o=0,h=0,p=Infinity,q=0,r=0,s=0,f=document.createElement("div");f.id="stats";f.addEventListener("mousedown",function(b){b.preventDefault();t(++s%2)},!1);f.style.cssText="width:80px;opacity:0.9;cursor:pointer";var a=document.createElement("div");a.id="fps";a.style.cssText="padding:0 0 3px 3px;text-align:left;background-color:#002";f.appendChild(a);var i=document.createElement("div");i.id="fpsText";i.style.cssText="color:#0ff;font-family:Helvetica,Arial,sans-serif;font-size:9px;font-weight:bold;line-height:15px";
i.innerHTML="FPS";a.appendChild(i);var c=document.createElement("div");c.id="fpsGraph";c.style.cssText="position:relative;width:74px;height:30px;background-color:#0ff";for(a.appendChild(c);74>c.children.length;){var j=document.createElement("span");j.style.cssText="width:1px;height:30px;float:left;background-color:#113";c.appendChild(j)}var d=document.createElement("div");d.id="ms";d.style.cssText="padding:0 0 3px 3px;text-align:left;background-color:#020;display:none";f.appendChild(d);var k=document.createElement("div");
k.id="msText";k.style.cssText="color:#0f0;font-family:Helvetica,Arial,sans-serif;font-size:9px;font-weight:bold;line-height:15px";k.innerHTML="MS";d.appendChild(k);var e=document.createElement("div");e.id="msGraph";e.style.cssText="position:relative;width:74px;height:30px;background-color:#0f0";for(d.appendChild(e);74>e.children.length;)j=document.createElement("span"),j.style.cssText="width:1px;height:30px;float:left;background-color:#131",e.appendChild(j);var t=function(b){s=b;switch(s){case 0:a.style.display=
"block";d.style.display="none";break;case 1:a.style.display="none",d.style.display="block"}};return{REVISION:11,domElement:f,setMode:t,begin:function(){l=Date.now()},end:function(){var b=Date.now();g=b-l;n=Math.min(n,g);o=Math.max(o,g);k.textContent=g+" MS ("+n+"-"+o+")";var a=Math.min(30,30-30*(g/200));e.appendChild(e.firstChild).style.height=a+"px";r++;b>m+1E3&&(h=Math.round(1E3*r/(b-m)),p=Math.min(p,h),q=Math.max(q,h),i.textContent=h+" FPS ("+p+"-"+q+")",a=Math.min(30,30-30*(h/100)),c.appendChild(c.firstChild).style.height=
a+"px",m=b,r=0);return b},update:function(){l=this.end()}}};
var stats = new Stats();
stats.setMode(2);
stats.domElement.style.position = 'absolute';
stats.domElement.style.left = '10px';
stats.domElement.style.top = '50px';
stats.domElement.style.zIndex = 100;
document.addEventListener('DOMContentLoaded', function(event) {
document.body.appendChild(stats.domElement);
});
var objectEmit_ = tracking.ObjectTracker.prototype.emit;
var colorEmit_ = tracking.ColorTracker.prototype.emit;
stats.begin();
tracking.ObjectTracker.prototype.emit = function() {
stats.end();
objectEmit_.apply(this, arguments);
};
tracking.ColorTracker.prototype.emit = function() {
stats.end();
colorEmit_.apply(this, arguments);
};

View File

@ -0,0 +1,105 @@
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<title>tracking.js - feature matching</title>
<link rel="stylesheet" href="assets/demo.css">
<script src="../build/tracking-min.js"></script>
<script src="../node_modules/dat.gui/build/dat.gui.min.js"></script>
<style>
.demo-container {
background-color: black;
}
#image1, #image2 {
position: absolute;
left: -1000px;
top: -1000px;
}
#canvas {
position: absolute;
left: 50%;
top: 50%;
margin-left: -393px;
margin-top: -147px;
}
</style>
</head>
<body>
<div class="demo-title">
<p><a href="http://trackingjs.com" target="_parent">tracking.js</a> match similar feature points in two images</p>
</div>
<div class="demo-frame">
<div class="demo-container">
<img id="image1" src="assets/brief1.png" />
<img id="image2" src="assets/brief2.png" />
<canvas id="canvas" width="786" height="295"></canvas>
</div>
</div>
<script>
window.onload = function() {
var width = 393;
var height = 295;
var canvas = document.getElementById('canvas');
var context = canvas.getContext('2d');
var image1 = document.getElementById('image1');
var image2 = document.getElementById('image2');
window.descriptorLength = 256;
window.matchesShown = 30;
window.blurRadius = 3;
var doMatch = function() {
tracking.Brief.N = window.descriptorLength;
context.drawImage(image1, 0, 0, width, height);
context.drawImage(image2, width, 0, width, height);
var imageData1 = context.getImageData(0, 0, width, height);
var imageData2 = context.getImageData(width, 0, width, height);
var gray1 = tracking.Image.grayscale(tracking.Image.blur(imageData1.data, width, height, blurRadius), width, height);
var gray2 = tracking.Image.grayscale(tracking.Image.blur(imageData2.data, width, height, blurRadius), width, height);
var corners1 = tracking.Fast.findCorners(gray1, width, height);
var corners2 = tracking.Fast.findCorners(gray2, width, height);
var descriptors1 = tracking.Brief.getDescriptors(gray1, width, corners1);
var descriptors2 = tracking.Brief.getDescriptors(gray2, width, corners2);
var matches = tracking.Brief.reciprocalMatch(corners1, descriptors1, corners2, descriptors2);
matches.sort(function(a, b) {
return b.confidence - a.confidence;
});
for (var i = 0; i < Math.min(window.matchesShown, matches.length); i++) {
var color = '#' + Math.floor(Math.random()*16777215).toString(16);
context.fillStyle = color;
context.strokeStyle = color;
context.fillRect(matches[i].keypoint1[0], matches[i].keypoint1[1], 4, 4);
context.fillRect(matches[i].keypoint2[0] + width, matches[i].keypoint2[1], 4, 4);
context.beginPath();
context.moveTo(matches[i].keypoint1[0], matches[i].keypoint1[1]);
context.lineTo(matches[i].keypoint2[0] + width, matches[i].keypoint2[1]);
context.stroke();
}
};
doMatch();
var gui = new dat.GUI();
gui.add(window, 'descriptorLength', 128, 512).step(32).onChange(doMatch);
gui.add(window, 'matchesShown', 1, 100).onChange(doMatch);
gui.add(window, 'blurRadius', 1.1, 5).onChange(doMatch);
}
</script>
</body>
</html>

View File

@ -0,0 +1,185 @@
<!doctype html>
<html>
<head>
<title>tracking.js - bounding box with camera</title>
<meta charset="utf-8">
<link rel="stylesheet" href="assets/demo.css">
<script src="../build/tracking-min.js"></script>
<script src="../node_modules/dat.gui/build/dat.gui.min.js"></script>
<script src="assets/stats.min.js"></script>
<style>
#boundingBox {
display: none;
position: absolute;
background: white;
border: 1px dashed;
opacity: .5;
z-index: 1;
}
#video {
position: absolute;
top: -1000px;
cursor: crosshair;
}
body {
-webkit-touch-callout: none;
-webkit-user-select: none;
-khtml-user-select: none;
-moz-user-select: none;
-ms-user-select: none;
user-select: none;
}
</style>
</head>
<body>
<div class="demo-title">
<p><a href="http://trackingjs.com" target="_parent">tracking.js</a> Click and drag to select the area to be tracked</p>
</div>
<div id="boundingBox"></div>
<div class="demo-frame">
<div class="demo-container">
<video id="video" width="393" height="295" preload autoplay loop muted controls></video>
<canvas id="canvas" width="800" height="530"></canvas>
</div>
</div>
<script>
(function() {
// BoundingBoxTracker ======================================================
var BoundingBoxTracker = function() {
BoundingBoxTracker.base(this, 'constructor');
};
tracking.inherits(BoundingBoxTracker, tracking.Tracker);
BoundingBoxTracker.prototype.templateDescriptors_ = null;
BoundingBoxTracker.prototype.templateKeypoints_ = null;
BoundingBoxTracker.prototype.fastThreshold = 60;
BoundingBoxTracker.prototype.blur = 3;
BoundingBoxTracker.prototype.setTemplate = function(pixels, width, height) {
var blur = tracking.Image.blur(pixels, width, height, 3);
var grayscale = tracking.Image.grayscale(blur, width, height);
this.templateKeypoints_ = tracking.Fast.findCorners(grayscale, width, height);
this.templateDescriptors_ = tracking.Brief.getDescriptors(grayscale, width, this.templateKeypoints_);
};
BoundingBoxTracker.prototype.track = function(pixels, width, height) {
var blur = tracking.Image.blur(pixels, width, height, this.blur);
var grayscale = tracking.Image.grayscale(blur, width, height);
var keypoints = tracking.Fast.findCorners(grayscale, width, height, this.fastThreshold);
var descriptors = tracking.Brief.getDescriptors(grayscale, width, keypoints);
this.emit('track', {
data: tracking.Brief.reciprocalMatch(this.templateKeypoints_, this.templateDescriptors_, keypoints, descriptors)
});
};
// Track ===================================================================
var boundingBox = document.getElementById('boundingBox');
var boxLeft = 403;
var video = document.getElementById('video');
var canvas = document.getElementById('canvas');
var canvasRect = canvas.getBoundingClientRect();
var context = canvas.getContext('2d');
var templateImageData;
var capturing = false;
var videoHeight = 295;
var videoWidth = 393;
var tracker = new BoundingBoxTracker();
tracker.on('track', function(event) {
stats.end();
if (capturing) {
return;
}
// Sorts best matches by confidence.
event.data.sort(function(a, b) {
return b.confidence - a.confidence;
});
// Re-draws template on canvas.
context.putImageData(templateImageData, boxLeft, 0);
// Plots lines connecting matches.
for (var i = 0; i < Math.min(10, event.data.length); i++) {
var template = event.data[i].keypoint1;
var frame = event.data[i].keypoint2;
context.beginPath();
context.strokeStyle = 'magenta';
context.moveTo(frame[0], frame[1]);
context.lineTo(boxLeft + template[0], template[1]);
context.stroke();
}
});
var trackerTask = tracking.track(video, tracker, { camera: true });
// Waits for the user to accept the camera.
trackerTask.stop();
// Sync video ============================================================
function requestFrame() {
window.requestAnimationFrame(function() {
context.clearRect(0, 0, canvas.width, canvas.height);
if (video.readyState === video.HAVE_ENOUGH_DATA) {
try {
context.drawImage(video, 0, 0, videoWidth, videoHeight);
} catch (err) {}
}
requestFrame();
});
}
requestFrame();
// Bounding box drag =====================================================
var initialPoint;
var left;
var top;
var width;
var height;
canvas.addEventListener('mousedown', function(event) {
initialPoint = [event.pageX, event.pageY];
capturing = true;
});
canvas.addEventListener('mousemove', function(event) {
if (capturing) {
left = Math.min(initialPoint[0], event.pageX);
top = Math.min(initialPoint[1], event.pageY);
width = Math.max(initialPoint[0], event.pageX) - left;
height = Math.max(initialPoint[1], event.pageY) - top;
boundingBox.style.display = 'block';
boundingBox.style.left = left + 'px';
boundingBox.style.top = top + 'px';
boundingBox.style.width = width + 'px';
boundingBox.style.height = height + 'px';
}
});
document.addEventListener('mouseup', function() {
boundingBox.style.display = 'none';
setTackerTemplate(left, top, width, height);
capturing = false;
});
function setTackerTemplate(left, top, width, height) {
templateImageData = context.getImageData(left - canvasRect.left, top - canvasRect.top, width, height);
canvas.width = boxLeft + width;
context.putImageData(templateImageData, boxLeft, 0);
trackerTask.stop();
tracker.setTemplate(templateImageData.data, width, height);
trackerTask.run();
}
// GUI Controllers
var gui = new dat.GUI();
gui.add(tracker, 'fastThreshold', 20, 100).step(5);
gui.add(tracker, 'blur', 1.1, 5.0).step(0.1);
}());
</script>
</body>
</html>

View File

@ -0,0 +1,65 @@
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<title>tracking.js - color with camera</title>
<link rel="stylesheet" href="assets/demo.css">
<script src="../build/tracking-min.js"></script>
<script src="../node_modules/dat.gui/build/dat.gui.min.js"></script>
<script src="assets/stats.min.js"></script>
<script src="assets/color_camera_gui.js"></script>
<style>
video, canvas {
margin-left: 100px;
margin-top: 35px;
position: absolute;
}
</style>
</head>
<body>
<div class="demo-title">
<p><a href="http://trackingjs.com" target="_parent">tracking.js</a> choose the colors you want to detect through the controls on the right</p>
</div>
<div class="demo-frame">
<div class="demo-container">
<video id="video" width="600" height="450" preload autoplay loop muted controls></video>
<canvas id="canvas" width="600" height="450"></canvas>
</div>
</div>
<script>
window.onload = function() {
var video = document.getElementById('video');
var canvas = document.getElementById('canvas');
var context = canvas.getContext('2d');
var tracker = new tracking.ColorTracker();
tracking.track('#video', tracker, { camera: true });
tracker.on('track', function(event) {
context.clearRect(0, 0, canvas.width, canvas.height);
event.data.forEach(function(rect) {
if (rect.color === 'custom') {
rect.color = tracker.customColor;
}
context.strokeStyle = rect.color;
context.strokeRect(rect.x, rect.y, rect.width, rect.height);
context.font = '11px Helvetica';
context.fillStyle = "#fff";
context.fillText('x: ' + rect.x + 'px', rect.x + rect.width + 5, rect.y + 11);
context.fillText('y: ' + rect.y + 'px', rect.x + rect.width + 5, rect.y + 22);
});
});
initGUIControllers(tracker);
};
</script>
</body>
</html>

View File

@ -0,0 +1,114 @@
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<title>tracking.js - draw something</title>
<link rel="stylesheet" href="assets/demo.css">
<script src="../build/tracking.js"></script>
<script src="assets/splines.min.js"></script>
<script src="assets/stats.min.js"></script>
<style>
#canvas,
#video {
height: 300px;
position: absolute;
width: 400px;
padding-top: 66px;
}
.draw-frame {
background: url(assets/draw_frame.png);
width: 400px;
height: 414px;
border: 1px solid #ccc;
top: 50%;
left: 50%;
position: absolute;
margin: -207px 0 0 -200px;
}
canvas, video {
-moz-transform: scale(-1, 1);
-o-transform: scale(-1, 1);
-webkit-transform: scale(-1, 1);
filter: FlipH;
transform: scale(-1, 1);
}
</style>
</head>
<body>
<div class="demo-title">
<p><a href="http://trackingjs.com" target="_parent">tracking.js</a> use magenta color to draw and cyan to erase</p>
</div>
<div class="demo-frame">
<div class="demo-container">
<div class="draw-frame">
<video id="video" width="400" height="300" preload autoplay loop muted></video>
<canvas id="canvas" width="400" height="300"></canvas>
</div>
</div>
</div>
<script>
window.onload = function() {
var video = document.getElementById('video');
var canvas = document.getElementById('canvas');
var context = canvas.getContext('2d');
var drawSegments = [[]];
var segment = 0;
var tracker = new tracking.ColorTracker(['magenta', 'cyan']);
tracking.track('#video', tracker, { camera: true });
tracker.on('track', function(event) {
if (event.data.length === 0 && drawSegments[segment].length > 0) {
segment++;
if (!drawSegments[segment]) {
drawSegments[segment] = [];
}
}
event.data.forEach(function(rect) {
if (rect.color === 'magenta') {
draw(rect);
}
else if (rect.color === 'cyan') {
erase(rect);
}
});
});
function draw(rect) {
drawSegments[segment].push(rect.x + rect.width / 2, rect.y + rect.height / 2);
}
function erase(rect) {
context.clearRect(rect.x, rect.y, rect.width, rect.height);
}
function isInsideRect(x, y, rect) {
return rect.x <= x && x <= rect.x + rect.width &&
rect.y <= y && y <= rect.y + rect.height;
}
(function loop() {
for (var i = 0, len = drawSegments.length; i < len; i++) {
drawSpline(context, drawSegments[i], 0.5, false);
}
drawSegments = [drawSegments[drawSegments.length - 1]];
segment = 0;
requestAnimationFrame(loop);
}());
};
</script>
</body>
</html>

View File

@ -0,0 +1,150 @@
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<title>tracking.js - color tracking fish tank</title>
<link rel="stylesheet" href="assets/demo.css">
<script src="../build/tracking-min.js"></script>
<script src="../../threejs/build/three.min.js"></script>
<style>
body {
overflow: hidden;
}
#video, #canvas {
bottom: 0;
position: absolute;
z-index: 100;
}
</style>
</head>
<body>
<div class="demo-title">
<p><a href="http://trackingjs.com" target="_parent">tracking.js</a> use a magenta colored object to control the scene</p>
</div>
<video id="video" width="320" height="240" preload autoplay loop muted></video>
<canvas id="canvas" width="320" height="240"></canvas>
<script>
var container;
var camera, scene, renderer, group, particle;
var mouseX = 0, mouseY = 0;
var video = document.getElementById('video');
var canvas = document.getElementById('canvas');
var context = canvas.getContext('2d');
var windowHalfX = window.innerWidth / 2;
var windowHalfY = window.innerHeight / 2;
init();
animate();
window.onload = function() {
var tracker = new tracking.ColorTracker();
tracker.setMinDimension(5);
tracker.setMinGroupSize(10);
tracking.track('#video', tracker, { camera: true });
tracker.on('track', onColorMove);
};
function init() {
container = document.createElement('div');
document.body.appendChild(container);
camera = new THREE.PerspectiveCamera(75, window.innerWidth / window.innerHeight, 1, 3000);
camera.position.z = 1000;
scene = new THREE.Scene();
var PI2 = Math.PI * 2;
var program = function (context) {
context.beginPath();
context.arc(0, 0, 0.5, 0, PI2, true);
context.fill();
}
group = new THREE.Object3D();
scene.add(group);
for (var i = 0; i < 1000; i++) {
var material = new THREE.SpriteCanvasMaterial({
color: Math.random() * 0x808008 + 0x808080,
program: program
});
particle = new THREE.Sprite(material);
particle.position.x = Math.random() * 2000 - 1000;
particle.position.y = Math.random() * 2000 - 1000;
particle.position.z = Math.random() * 2000 - 1000;
particle.scale.x = particle.scale.y = Math.random() * 20 + 10;
group.add(particle);
}
renderer = new THREE.CanvasRenderer();
renderer.setSize(window.innerWidth, window.innerHeight);
container.appendChild(renderer.domElement);
window.addEventListener('resize', onWindowResize, false);
}
function onWindowResize() {
windowHalfX = window.innerWidth / 2;
windowHalfY = window.innerHeight / 2;
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize(window.innerWidth, window.innerHeight);
}
function onColorMove(event) {
if (event.data.length === 0) {
return;
}
var maxRect;
var maxRectArea = 0;
event.data.forEach(function(rect) {
if (rect.width * rect.height > maxRectArea){
maxRectArea = rect.width * rect.height;
maxRect = rect;
}
});
if (maxRectArea > 0) {
var rectCenterX = maxRect.x + (maxRect.width/2);
var rectCenterY = maxRect.y + (maxRect.height/2);
mouseX = (rectCenterX - 160) * (window.innerWidth/320) * 10;
mouseY = (rectCenterY - 120) * (window.innerHeight/240) * 10;
context.clearRect(0, 0, canvas.width, canvas.height);
context.strokeStyle = maxRect.color;
context.strokeRect(maxRect.x, maxRect.y, maxRect.width, maxRect.height);
context.font = '11px Helvetica';
context.fillStyle = "#fff";
context.fillText('x: ' + maxRect.x + 'px', maxRect.x + maxRect.width + 5, maxRect.y + 11);
context.fillText('y: ' + maxRect.y + 'px', maxRect.x + maxRect.width + 5, maxRect.y + 22);
}
}
function animate() {
window.requestAnimationFrame(animate);
render();
}
function render() {
camera.position.x += (mouseX - camera.position.x) * 0.05;
camera.position.y += (- mouseY - camera.position.y) * 0.05;
camera.lookAt(scene.position);
renderer.render(scene, camera);
}
</script>
</body>
</html>

View File

@ -0,0 +1,60 @@
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<title>tracking.js - color hello world</title>
<link rel="stylesheet" href="assets/demo.css">
<script src="../build/tracking-min.js"></script>
<style>
.rect {
width: 80px;
height: 80px;
position: absolute;
left: -1000px;
top: -1000px;
}
</style>
</head>
<body>
<div class="demo-title">
<p><a href="http://trackingjs.com" target="_parent">tracking.js</a> detect certain colors in a image</p>
</div>
<div class="demo-frame">
<div class="demo-container">
<img id="img" src="assets/psmove.png" />
</div>
</div>
<script>
window.onload = function() {
var img = document.getElementById('img');
var demoContainer = document.querySelector('.demo-container');
var tracker = new tracking.ColorTracker(['magenta', 'cyan', 'yellow']);
tracker.on('track', function(event) {
event.data.forEach(function(rect) {
window.plot(rect.x, rect.y, rect.width, rect.height, rect.color);
});
});
tracking.track('#img', tracker);
window.plot = function(x, y, w, h, color) {
var rect = document.createElement('div');
document.querySelector('.demo-container').appendChild(rect);
rect.classList.add('rect');
rect.style.border = '2px solid ' + color;
rect.style.width = w + 'px';
rect.style.height = h + 'px';
rect.style.left = (img.offsetLeft + x) + 'px';
rect.style.top = (img.offsetTop + y) + 'px';
};
};
</script>
</body>
</html>

View File

@ -0,0 +1,82 @@
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<title>tracking.js - color with video</title>
<link rel="stylesheet" href="assets/demo.css">
<script src="../build/tracking-min.js"></script>
<script src="../node_modules/dat.gui/build/dat.gui.min.js"></script>
<script src="assets/stats.min.js"></script>
<script src="assets/color_camera_gui.js"></script>
<style>
.demo-container {
background-color: black;
}
video, canvas {
position: absolute;
}
</style>
</head>
<body>
<div class="demo-title">
<p><a href="http://trackingjs.com" target="_parent">tracking.js</a> detect certain colors in a video</p>
</div>
<div class="demo-frame">
<div class="demo-container">
<div id="rectangle"></div>
<video id="video" width="800" height="530" preload autoplay loop muted controls>
<source src="assets/minions.mp4" type="video/mp4">
<source src="assets/minions.ogv" type="video/ogg">
</video>
<canvas id="canvas" width="800" height="500"></canvas>
</div>
</div>
<script>
window.onload = function() {
var canvas = document.getElementById('canvas');
var context = canvas.getContext('2d');
tracking.ColorTracker.registerColor('purple', function(r, g, b) {
var dx = r - 120;
var dy = g - 60;
var dz = b - 210;
if ((b - g) >= 100 && (r - g) >= 60) {
return true;
}
return dx * dx + dy * dy + dz * dz < 3500;
});
var tracker = new tracking.ColorTracker(['yellow', 'purple']);
tracker.setMinDimension(5);
tracking.track('#video', tracker);
tracker.on('track', function(event) {
context.clearRect(0, 0, canvas.width, canvas.height);
event.data.forEach(function(rect) {
if (rect.color === 'custom') {
rect.color = tracker.customColor;
}
context.strokeStyle = rect.color;
context.strokeRect(rect.x, rect.y, rect.width, rect.height);
context.font = '11px Helvetica';
context.fillStyle = "#fff";
context.fillText('x: ' + rect.x + 'px', rect.x + rect.width + 5, rect.y + 11);
context.fillText('y: ' + rect.y + 'px', rect.x + rect.width + 5, rect.y + 22);
});
});
initGUIControllers(tracker);
};
</script>
</body>
</html>

View File

@ -0,0 +1,100 @@
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<title>tracking.js - face alignment with images</title>
<link rel="stylesheet" href="assets/demo.css">
<script src="../build/tracking.js"></script>
<script src="../build/data/face-min.js"></script>
<script src="../src/alignment/training/Landmarks.js"></script>
<script src="../src/alignment/training/Regressor.js"></script>
<script src="../node_modules/dat.gui/build/dat.gui.min.js"></script>
<script src="assets/stats.min.js"></script>
<style>
.rect, .circle {
left: -1000px;
position: absolute;
top: -1000px;
}
.rect{
border: 2px solid #a64ceb;
}
.circle {
border-radius: 50%;
box-shadow: 0px 0px 3px rgba(0,0,0,0.3);
}
#img {
position: absolute;
top: 50%;
left: 50%;
margin: -200px 0 0 -200px;
}
</style>
</head>
<body>
<div class="demo-title">
<p><a href="http://trackingjs.com" target="_parent">tracking.js</a> align face landmarks to images</p>
</div>
<div class="demo-frame">
<div class="demo-container">
<img id="img" src="assets/emilia.jpg" />
</div>
</div>
<script>
window.onload = function() {
var img = document.getElementById('img');
var tracker = new tracking.LandmarksTracker();
tracker.setInitialScale(4);
tracker.setStepSize(2);
tracker.setEdgesDensity(0.1);
tracking.track('#img', tracker);
tracker.on('track', function(event) {
if(!event.data) return;
event.data.faces.forEach(function(rect) {
window.plot(rect.x, rect.y, rect.width, rect.height);
});
event.data.landmarks.forEach(function(landmarks) {
for(var i=0; i < landmarks.length; i++){
window.plotLandmark(landmarks[i][0], landmarks[i][1], 2, '#44ABDA');
}
});
});
window.plot = function(x, y, w, h) {
var rect = document.createElement('div');
document.querySelector('.demo-container').appendChild(rect);
rect.classList.add('rect');
rect.style.width = w + 'px';
rect.style.height = h + 'px';
rect.style.left = (img.offsetLeft + x) + 'px';
rect.style.top = (img.offsetTop + y) + 'px';
};
window.plotLandmark = function(x,y, radius, color){
var circle = document.createElement('div');
document.querySelector('.demo-container').appendChild(circle);
circle.classList.add('circle');
circle.style.backgroundColor = color;
circle.style.width = (radius*2) + 'px';
circle.style.height = (radius*2) + 'px';
circle.style.left = (img.offsetLeft + x) + 'px';
circle.style.top = (img.offsetTop + y) + 'px';
}
};
</script>
</body>
</html>

View File

@ -0,0 +1,84 @@
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<title>tracking.js - face alignment with camera</title>
<link rel="stylesheet" href="assets/demo.css">
<script src="../build/tracking.js"></script>
<script src="../build/data/face-min.js"></script>
<script src="../src/alignment/training/Landmarks.js"></script>
<script src="../src/alignment/training/Regressor.js"></script>
<script src="../node_modules/dat.gui/build/dat.gui.min.js"></script>
<script src="assets/stats.min.js"></script>
<style>
video, canvas {
margin-left: 230px;
margin-top: 120px;
position: absolute;
}
</style>
</head>
<body>
<div class="demo-title">
<p><a href="http://trackingjs.com" target="_parent">tracking.js</a> get user's webcam and align face landmarks to detected faces</p>
</div>
<div class="demo-frame">
<div class="demo-container">
<video id="video" width="320" height="240" src="assets/franck.mp4" preload autoplay loop muted></video>
<canvas id="canvas" width="320" height="240"></canvas>
</div>
</div>
<script>
window.onload = function() {
var video = document.getElementById('video');
var canvas = document.getElementById('canvas');
var context = canvas.getContext('2d');
var tracker = new tracking.LandmarksTracker();
tracker.setInitialScale(4);
tracker.setStepSize(2);
tracker.setEdgesDensity(0.1);
tracking.track('#video', tracker);
tracker.on('track', function(event) {
context.clearRect(0,0,canvas.width, canvas.height);
if(!event.data) return;
event.data.faces.forEach(function(rect) {
context.strokeStyle = '#a64ceb';
context.strokeRect(rect.x, rect.y, rect.width, rect.height);
context.font = '11px Helvetica';
context.fillStyle = "#fff";
context.fillText('x: ' + rect.x + 'px', rect.x + rect.width + 5, rect.y + 11);
context.fillText('y: ' + rect.y + 'px', rect.x + rect.width + 5, rect.y + 22);
});
event.data.landmarks.forEach(function(landmarks) {
for(var l in landmarks){
context.beginPath();
context.fillStyle = "#fff";
context.arc(landmarks[l][0],landmarks[l][1],1,0,2*Math.PI);
context.fill();
}
});
});
var gui = new dat.GUI();
gui.add(tracker, 'edgesDensity', 0.1, 0.5).step(0.01).listen();
gui.add(tracker, 'initialScale', 1.0, 10.0).step(0.1).listen();
gui.add(tracker, 'stepSize', 1, 5).step(0.1).listen();
};
</script>
</body>
</html>

View File

@ -0,0 +1,229 @@
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<title>tracking.js - face alignment with camera</title>
<!-- here is the frame around each example - to be removed - to a fullscreen video - working on mobile too -->
<!-- <link rel="stylesheet" href="assets/demo.css"> -->
<script src="../build/tracking.js"></script>
<script src="../build/data/face-min.js"></script>
<script src="../src/alignment/training/Landmarks.js"></script>
<script src="../src/alignment/training/Regressor.js"></script>
<script src="../node_modules/dat.gui/build/dat.gui.min.js"></script>
</head>
<body>
<style>
#videoWebcam {
position: absolute;
top: 0px;
left: 0px;
width : 320px;
height: auto;
zoom: 3;
}
#canvasDetection {
position: absolute;
top: 0px;
left: 0px;
width : 320px;
height: auto;
zoom: 3;
}
</style>
<video id="videoWebcam" width="368" height="288" autoplay loop>
<source src="./assets/franck.mp4" type="video/mp4"/>
<source src="./assets/franck.ogv" type="video/ogg"/>
</video>
<!-- <video id="videoWebcam" preload autoplay loop muted></video> -->
<canvas id="canvasDetection"></canvas>
<script>
var canvasDetection = document.querySelector('#canvasDetection');
canvasDetection.width = 320
canvasDetection.height = 240
var context = canvasDetection.getContext('2d');
// tracking.LBF.maxNumStages = 10
var tracker = new tracking.LandmarksTracker();
tracker.setEdgesDensity(0.1);
tracker.setInitialScale(4);
tracker.setStepSize(2);
tracker.setInitialScale(2);
tracker.setStepSize(1);
var gui = new dat.GUI();
gui.add(tracker, 'edgesDensity', 0.1, 0.5).step(0.01).listen();
gui.add(tracker, 'initialScale', 1.0, 10.0).step(0.1).listen();
gui.add(tracker, 'stepSize', 0.5, 5).step(0.1).listen();
var videoElement = document.querySelector('#videoWebcam')
tracking.track(videoElement, tracker);
// tracking.track(videoElement, tracker, { camera: true });
var landmarksPerFace = 30
var landmarkFeatures = {
jaw : {
first: 0,
last: 8,
fillStyle: 'white',
closed: false,
},
nose : {
first:15,
last: 18,
fillStyle: 'green',
closed: true,
},
mouth : {
first:27,
last: 30,
fillStyle: 'red',
closed: true,
},
eyeL : {
first:19,
last: 22,
fillStyle: 'purple',
closed: false,
},
eyeR : {
first:23,
last: 26,
fillStyle: 'purple',
closed: false,
},
eyeBrowL : {
first: 9,
last: 11,
fillStyle: 'yellow',
closed: false,
},
eyeBrowR : {
first:12,
last: 14,
fillStyle: 'yellow',
closed: false,
},
}
//////////////////////////////////////////////////////////////////////////////
// Code Separator
//////////////////////////////////////////////////////////////////////////////
var parameters = {
landmarkLerpFactor : 0.7,
boundinBoxVisible : true,
jawVisible : true,
eyeBrowLVisible : true,
eyeBrowRVisible : true,
noseVisible : true,
eyeLVisible : true,
eyeRVisible : true,
mouthVisible : true,
}
gui.add(parameters, 'landmarkLerpFactor', 0.0, 1).listen().name('Landmarks Lerp');
gui.add(parameters, 'boundinBoxVisible', 0.0, 1).listen().name('bounding box');
Object.keys(landmarkFeatures).forEach(function(featureLabel){
gui.add(parameters, featureLabel + 'Visible').listen().name(featureLabel);
})
var lerpedFacesLandmarks = []
tracker.on('track', function(event) {
// clear debug canvasDetection
context.clearRect(0,0,canvasDetection.width, canvasDetection.height);
if( event.data === undefined ) return;
event.data.faces.forEach(function(boundingBox, faceIndex) {
var faceLandmarks = event.data.landmarks[faceIndex]
if( parameters.boundinBoxVisible === true ) displayFaceLandmarksBoundingBox(boundingBox, faceIndex)
// lerpFacesLandmarks
lerpFacesLandmarks(faceLandmarks)
// display each faceLandmarks
displayFaceLandmarksDot(lerpedFacesLandmarks)
});
})
function lerpFacesLandmarks(newFaceLandmarks){
// init lerpFacesLandmarks if needed
for(var i = 0; i < newFaceLandmarks.length; i++){
if( lerpedFacesLandmarks[i] !== undefined ) continue
lerpedFacesLandmarks[i] = [
newFaceLandmarks[i][0],
newFaceLandmarks[i][1],
]
}
// init lerpFacesLandmarks if needed
for(var i = 0; i < newFaceLandmarks.length; i++){
var lerpFactor = parameters.landmarkLerpFactor
lerpedFacesLandmarks[i][0] = newFaceLandmarks[i][0] * lerpFactor + lerpedFacesLandmarks[i][0] * (1-lerpFactor)
lerpedFacesLandmarks[i][1] = newFaceLandmarks[i][1] * lerpFactor + lerpedFacesLandmarks[i][1] * (1-lerpFactor)
}
}
//////////////////////////////////////////////////////////////////////////////
// Code Separator
//////////////////////////////////////////////////////////////////////////////
function displayFaceLandmarksBoundingBox(boundingBox, faceIndex){
// display the box
context.strokeStyle = '#a64ceb';
context.strokeRect(boundingBox.x, boundingBox.y, boundingBox.width, boundingBox.height);
// display the size of the box
context.font = '11px Helvetica';
context.fillStyle = "#fff";
context.fillText('idx: '+faceIndex, boundingBox.x + boundingBox.width + 5, boundingBox.y + 11);
context.fillText('x: ' + boundingBox.x + 'px', boundingBox.x + boundingBox.width + 5, boundingBox.y + 22);
context.fillText('y: ' + boundingBox.y + 'px', boundingBox.x + boundingBox.width + 5, boundingBox.y + 33);
}
function displayFaceLandmarksDot(faceLandmarks){
Object.keys(landmarkFeatures).forEach(function(featureLabel){
if( parameters[featureLabel+'Visible'] === false ) return
displayFaceLandmarksFeature(faceLandmarks, featureLabel)
})
}
function displayFaceLandmarksFeature(faceLandmarks, featureLabel){
var feature = landmarkFeatures[featureLabel]
// draw dots
context.fillStyle = feature.fillStyle
for(var i = feature.first; i <= feature.last; i++){
var xy = faceLandmarks[i]
context.beginPath();
context.arc(xy[0],xy[1],1,0,2*Math.PI);
context.fill();
}
// draw lines
var feature = landmarkFeatures[featureLabel]
context.strokeStyle = feature.fillStyle
context.beginPath();
for(var i = feature.first; i <= feature.last; i++){
var x = faceLandmarks[i][0]
var y = faceLandmarks[i][1]
if( i === 0 ){
context.moveTo(x, y)
}else{
context.lineTo(x, y)
}
}
if( feature.closed === true ){
var x = faceLandmarks[feature.first][0]
var y = faceLandmarks[feature.first][1]
context.lineTo(x, y)
}
context.stroke();
}
</script></body>

View File

@ -0,0 +1,67 @@
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<title>tracking.js - face with camera</title>
<link rel="stylesheet" href="assets/demo.css">
<script src="../build/tracking-min.js"></script>
<script src="../build/data/face-min.js"></script>
<script src="../node_modules/dat.gui/build/dat.gui.min.js"></script>
<script src="assets/stats.min.js"></script>
<style>
video, canvas {
margin-left: 230px;
margin-top: 120px;
position: absolute;
}
</style>
</head>
<body>
<div class="demo-title">
<p><a href="http://trackingjs.com" target="_parent">tracking.js</a> get user's webcam and detect faces</p>
</div>
<div class="demo-frame">
<div class="demo-container">
<video id="video" width="320" height="240" preload autoplay loop muted></video>
<canvas id="canvas" width="320" height="240"></canvas>
</div>
</div>
<script>
window.onload = function() {
var video = document.getElementById('video');
var canvas = document.getElementById('canvas');
var context = canvas.getContext('2d');
var tracker = new tracking.ObjectTracker('face');
tracker.setInitialScale(4);
tracker.setStepSize(2);
tracker.setEdgesDensity(0.1);
tracking.track('#video', tracker, { camera: true });
tracker.on('track', function(event) {
context.clearRect(0, 0, canvas.width, canvas.height);
event.data.forEach(function(rect) {
context.strokeStyle = '#a64ceb';
context.strokeRect(rect.x, rect.y, rect.width, rect.height);
context.font = '11px Helvetica';
context.fillStyle = "#fff";
context.fillText('x: ' + rect.x + 'px', rect.x + rect.width + 5, rect.y + 11);
context.fillText('y: ' + rect.y + 'px', rect.x + rect.width + 5, rect.y + 22);
});
});
var gui = new dat.GUI();
gui.add(tracker, 'edgesDensity', 0.1, 0.5).step(0.01);
gui.add(tracker, 'initialScale', 1.0, 10.0).step(0.1);
gui.add(tracker, 'stepSize', 1, 5).step(0.1);
};
</script>
</body>
</html>

View File

@ -0,0 +1,89 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0">
<title>tracking.js - face tracking fish tank</title>
<link rel="stylesheet" href="assets/demo.css">
<script src="../build/tracking-min.js"></script>
<script src="../build/data/face-min.js"></script>
<script src="../../threejs/build/three.min.js"></script>
<script src="assets/fish_tank/FishTankRenderer.js"></script>
<style>
body {
overflow: hidden;
}
#video, #canvas {
bottom: 0;
position: absolute;
z-index: 100;
}
#viewport {
padding-top: 40px;
}
</style>
</head>
<body>
<div class="demo-title">
<p><a href="http://trackingjs.com" target="_parent">tracking.js</a> get user's webcam and detect faces to control the scene</p>
</div>
<div id="viewport">
<video id="video" width="320" height="240" preload autoplay loop muted></video>
<canvas id="canvas" width="320" height="240"></canvas>
</div>
<script>
var viewport = document.getElementById('viewport');
var canvas = document.getElementById('canvas');
var context = canvas.getContext('2d');
var fishTankRenderer = new FishTankRenderer();
fishTankRenderer.init(viewport);
var faceX = 0;
var faceY = 0;
var tracker = new tracking.ObjectTracker('face');
tracker.setInitialScale(4);
tracker.setStepSize(2);
tracking.track('#video', tracker, { camera: true });
tracker.on('track', function(event) {
var maxRectArea = 0;
var maxRect;
event.data.forEach(function(rect) {
if (rect.width * rect.height > maxRectArea){
maxRectArea = rect.width * rect.height;
maxRect = rect;
}
context.clearRect(0, 0, canvas.width, canvas.height);
context.strokeStyle = 'magenta';
context.strokeRect(rect.x, rect.y, rect.width, rect.height);
context.font = '11px Helvetica';
context.fillStyle = "#fff";
context.fillText('x: ' + rect.x + 'px', rect.x + rect.width + 5, rect.y + 11);
context.fillText('y: ' + rect.y + 'px', rect.x + rect.width + 5, rect.y + 22);
});
if(maxRectArea > 0) {
var rectCenterX = maxRect.x + (maxRect.width/2);
var rectCenterY = maxRect.y + (maxRect.height/2);
faceX = (rectCenterX - 160) * (window.innerWidth/320) * 50;
faceY = (rectCenterY - 120) * (window.innerHeight/240) * 50;
}
fishTankRenderer.render(faceX, faceY);
});
</script>
</body>
</html>

View File

@ -0,0 +1,68 @@
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<title>tracking.js - face hello world</title>
<link rel="stylesheet" href="assets/demo.css">
<script src="../build/tracking-min.js"></script>
<script src="../build/data/face-min.js"></script>
<script src="../build/data/eye-min.js"></script>
<script src="../build/data/mouth-min.js"></script>
<style>
.rect {
border: 2px solid #a64ceb;
left: -1000px;
position: absolute;
top: -1000px;
}
#img {
position: absolute;
top: 50%;
left: 50%;
margin: -173px 0 0 -300px;
}
</style>
</head>
<body>
<div class="demo-title">
<p><a href="http://trackingjs.com" target="_parent">tracking.js</a> detect faces, eyes and mouths in a image</p>
</div>
<div class="demo-frame">
<div class="demo-container">
<img id="img" src="assets/faces.jpg" />
</div>
</div>
<script>
window.onload = function() {
var img = document.getElementById('img');
var tracker = new tracking.ObjectTracker(['face', 'eye', 'mouth']);
tracker.setStepSize(1.7);
tracking.track('#img', tracker);
tracker.on('track', function(event) {
event.data.forEach(function(rect) {
window.plot(rect.x, rect.y, rect.width, rect.height);
});
});
window.plot = function(x, y, w, h) {
var rect = document.createElement('div');
document.querySelector('.demo-container').appendChild(rect);
rect.classList.add('rect');
rect.style.width = w + 'px';
rect.style.height = h + 'px';
rect.style.left = (img.offsetLeft + x) + 'px';
rect.style.top = (img.offsetTop + y) + 'px';
};
};
</script>
</body>
</html>

View File

@ -0,0 +1,123 @@
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<title>tracking.js - tag friends</title>
<link rel="stylesheet" href="assets/demo.css">
<script src="../build/tracking-min.js"></script>
<script src="../build/data/face-min.js"></script>
<style>
#photo:hover .rect {
opacity: .75;
transition: opacity .75s ease-out;
}
.rect:hover * {
opacity: 1;
}
.rect {
border-radius: 2px;
border: 3px solid white;
box-shadow: 0 16px 28px 0 rgba(0, 0, 0, 0.3);
cursor: pointer;
left: -1000px;
opacity: 0;
position: absolute;
top: -1000px;
}
.arrow {
border-bottom: 10px solid white;
border-left: 10px solid transparent;
border-right: 10px solid transparent;
height: 0;
width: 0;
position: absolute;
left: 50%;
margin-left: -5px;
bottom: -12px;
opacity: 0;
}
input {
border: 0px;
bottom: -42px;
color: #a64ceb;
font-size: 15px;
height: 30px;
left: 50%;
margin-left: -90px;
opacity: 0;
outline: none;
position: absolute;
text-align: center;
width: 180px;
transition: opacity .35s ease-out;
}
#img {
position: absolute;
top: 50%;
left: 50%;
margin: -173px 0 0 -300px;
}
</style>
</head>
<body>
<div class="demo-title">
<p><a href="http://trackingjs.com" target="_parent">tracking.js</a> hover image to see all faces detected</p>
</div>
<div class="demo-frame">
<div class="demo-container">
<span id="photo"><img id="img" src="assets/faces.jpg" /></span>
</div>
</div>
<script>
window.onload = function() {
var img = document.getElementById('img');
var tracker = new tracking.ObjectTracker('face');
tracking.track(img, tracker);
tracker.on('track', function(event) {
event.data.forEach(function(rect) {
plotRectangle(rect.x, rect.y, rect.width, rect.height);
});
});
var friends = [ 'Thomas Middleditch', 'Martin Starr', 'Zach Woods' ];
var plotRectangle = function(x, y, w, h) {
var rect = document.createElement('div');
var arrow = document.createElement('div');
var input = document.createElement('input');
input.value = friends.pop();
rect.onclick = function name() {
input.select();
};
arrow.classList.add('arrow');
rect.classList.add('rect');
rect.appendChild(input);
rect.appendChild(arrow);
document.getElementById('photo').appendChild(rect);
rect.style.width = w + 'px';
rect.style.height = h + 'px';
rect.style.left = (img.offsetLeft + x) + 'px';
rect.style.top = (img.offsetTop + y) + 'px';
};
};
</script>
</body>
</html>

View File

@ -0,0 +1,73 @@
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<title>tracking.js - feature detection</title>
<link rel="stylesheet" href="assets/demo.css">
<script src="../build/tracking-min.js"></script>
<script src="../node_modules/dat.gui/build/dat.gui.min.js"></script>
<style>
.demo-container {
background: #131112;
}
#image {
position: absolute;
left: -1000px;
top: -1000px;
}
#canvas {
position: absolute;
left: 50%;
top: 50%;
margin: -200px 0 0 -200px;
}
</style>
</head>
<body>
<div class="demo-title">
<p><a href="http://trackingjs.com" target="_parent">tracking.js</a> detect feature points on a image</p>
</div>
<div class="demo-frame">
<div class="demo-container">
<img id="image" src="assets/fast.png" />
<canvas id="canvas" width="400" height="400"></canvas>
</div>
</div>
<script>
window.onload = function() {
var width = 400;
var height = 400;
var canvas = document.getElementById('canvas');
var context = canvas.getContext('2d');
var image = document.getElementById('image');
window.fastThreshold = 10;
var doFindFeatures = function() {
tracking.Fast.THRESHOLD = window.fastThreshold;
context.drawImage(image, 0, 0, width, height);
var imageData = context.getImageData(0, 0, width, height);
var gray = tracking.Image.grayscale(imageData.data, width, height);
var corners = tracking.Fast.findCorners(gray, width, height);
for (var i = 0; i < corners.length; i += 2) {
context.fillStyle = '#f00';
context.fillRect(corners[i], corners[i + 1], 3, 3);
}
};
doFindFeatures();
var gui = new dat.GUI();
gui.add(window, 'fastThreshold', 0, 100).onChange(doFindFeatures);
}
</script>
</body>
</html>

View File

@ -0,0 +1,85 @@
<!doctype html>
<html>
<head>
<title>tracking.js - feature detector with camera</title>
<meta charset="utf-8">
<link rel="stylesheet" href="assets/demo.css">
<script src="../build/tracking-min.js"></script>
<script src="../node_modules/dat.gui/build/dat.gui.min.js"></script>
<script src="assets/stats.min.js"></script>
<style>
#video {
position: absolute;
top: -1000px;
}
#canvas {
left: 50%;
top: 50%;
margin-left: -200px;
margin-top: -150px;
position: absolute;
}
</style>
</head>
<body>
<div class="demo-title">
<p><a href="http://trackingjs.com" target="_parent">tracking.js</a> display detected features</p>
</div>
<div class="demo-frame">
<div class="demo-container">
<video id="video" width="400" height="300" preload autoplay loop muted></video>
<canvas id="canvas" width="400" height="300"></canvas>
</div>
</div>
<script>
var canvas = document.getElementById('canvas');
var context = canvas.getContext('2d');
var FastTracker = function() {
FastTracker.base(this, 'constructor');
};
tracking.inherits(FastTracker, tracking.Tracker);
tracking.Fast.THRESHOLD = 2;
FastTracker.prototype.threshold = tracking.Fast.THRESHOLD;
FastTracker.prototype.track = function(pixels, width, height) {
stats.begin();
var gray = tracking.Image.grayscale(pixels, width, height);
var corners = tracking.Fast.findCorners(gray, width, height);
stats.end();
this.emit('track', {
data: corners
});
};
var tracker = new FastTracker();
tracker.on('track', function(event) {
context.clearRect(0, 0, canvas.width, canvas.height);
var corners = event.data;
for (var i = 0; i < corners.length; i += 2) {
context.fillStyle = '#f00';
context.fillRect(corners[i], corners[i + 1], 2, 2);
}
});
tracking.track('#video', tracker, { camera: true });
// GUI Controllers
var gui = new dat.GUI();
gui.add(tracker, 'threshold', 1, 100).onChange(function(value) {
tracking.Fast.THRESHOLD = value;
});
</script>
</body>
</html>

View File

@ -0,0 +1,118 @@
'use strict';
var gulp = require('gulp');
var concat = require('gulp-concat');
var header = require('gulp-header');
var jsdoc = require('gulp-jsdoc');
var jshint = require('gulp-jshint');
var nodeunit = require('gulp-nodeunit');
var pkg = require('./package.json');
var rename = require('gulp-rename');
var rimraf = require('gulp-rimraf');
var stylish = require('jshint-stylish');
var uglify = require('gulp-uglify');
var esformatter = require('gulp-esformatter');
var runSequence = require('run-sequence');
gulp.task('all', ['clean'], function() {
return runSequence(['build', 'build-data']);
});
gulp.task('clean', function() {
return gulp.src('build').pipe(rimraf());
});
gulp.task('build', function() {
var files = [
'src/tracking.js',
'src/utils/EventEmitter.js',
'src/utils/Canvas.js',
'src/utils/DisjointSet.js',
'src/utils/Image.js',
'src/detection/ViolaJones.js',
'src/features/Brief.js',
'src/features/Fast.js',
'src/math/Math.js',
'src/math/Matrix.js',
'src/pose/EPnP.js',
'src/trackers/Tracker.js',
'src/trackers/TrackerTask.js',
'src/trackers/ColorTracker.js',
'src/trackers/ObjectTracker.js',
'src/trackers/LandmarksTracker.js',
'src/alignment/Regressor.js',
'src/alignment/LBF.js'
];
return gulp.src(files)
.pipe(concat('tracking.js'))
.pipe(banner())
.pipe(gulp.dest('build'))
.pipe(uglify())
.pipe(rename({
suffix: '-min'
}))
.pipe(banner())
.pipe(gulp.dest('build'));
});
gulp.task('build-data', function() {
return gulp.src('src/detection/training/haar/**.js')
.pipe(banner())
.pipe(gulp.dest('build/data'))
.pipe(rename({
suffix: '-min'
}))
.pipe(uglify())
.pipe(banner())
.pipe(gulp.dest('build/data'));
});
gulp.task('docs', function() {
return gulp.src(['src/**/*.js', 'README.md'])
.pipe(jsdoc('docs'));
});
gulp.task('format', function() {
return gulp.src(['src/**/*.js', '!src/detection/training/**/*.js'])
.pipe(esformatter())
.pipe(gulp.dest('src'));
});
gulp.task('lint', function() {
return gulp.src('src/**/**.js')
.pipe(jshint())
.pipe(jshint.reporter(stylish));
});
gulp.task('test', function(cb) {
gulp.src('test/*.js')
.pipe(nodeunit())
.on('end', cb);
});
gulp.task('test-watch', function() {
return gulp.watch(['src/**/*.js', 'test/**/*.js'], ['test']);
});
gulp.task('watch', function() {
gulp.watch('src/**/*.js', ['build']);
gulp.watch('src/data/*.js', ['build-data']);
});
// Private helpers
// ===============
function banner() {
var stamp = [
'/**',
' * <%= pkg.name %> - <%= pkg.description %>',
' * @author <%= pkg.author.name %> <<%= pkg.author.email %>>',
' * @version v<%= pkg.version %>',
' * @link <%= pkg.homepage %>',
' * @license <%= pkg.license %>',
' */',
''
].join('\n');
return header(stamp, { pkg: pkg });
}

View File

@ -0,0 +1,45 @@
{
"name": "tracking",
"version": "1.1.3",
"main": "build/tracking.js",
"description": "A modern approach for Computer Vision on the web.",
"homepage": "http://trackingjs.com",
"keywords": [
"tracking",
"trackingjs",
"webrtc"
],
"author": {
"name": "Eduardo Lundgren",
"email": "edu@rdo.io",
"web": "http://eduardo.io",
"twitter": "eduardolundgren"
},
"repository": {
"type": "git",
"url": "git@github.com:eduardolundgren/tracking.js.git"
},
"scripts": {
"test": "gulp test",
"build": "gulp build"
},
"license": "BSD",
"devDependencies": {
"gulp": "^3.9.0",
"gulp-concat": "^2.6.0",
"gulp-esformatter": "^5.0.0",
"gulp-header": "^1.7.1",
"gulp-jsdoc": "^0.1.5",
"gulp-jshint": "^2.0.0",
"gulp-nodeunit": "0.0.5",
"gulp-rename": "^1.2.2",
"gulp-rimraf": "^0.2.0",
"gulp-uglify": "^1.5.1",
"jshint": "^2.8.0",
"jshint-stylish": "^2.1.0",
"nodeunit": "^0.9.1",
"png-js": "^0.1.1",
"run-sequence": "^1.1.5",
"dat.gui": "^0.6.1"
}
}

View File

@ -0,0 +1,222 @@
(function() {
/**
* Face Alignment via Regressing Local Binary Features (LBF)
* This approach has two components: a set of local binary features and
* a locality principle for learning those features.
* The locality principle is used to guide the learning of a set of highly
* discriminative local binary features for each landmark independently.
* The obtained local binary features are used to learn a linear regression
* that later will be used to guide the landmarks in the alignment phase.
*
* @authors: VoxarLabs Team (http://cin.ufpe.br/~voxarlabs)
* Lucas Figueiredo <lsf@cin.ufpe.br>, Thiago Menezes <tmc2@cin.ufpe.br>,
* Thiago Domingues <tald@cin.ufpe.br>, Rafael Roberto <rar3@cin.ufpe.br>,
* Thulio Araujo <tlsa@cin.ufpe.br>, Joao Victor <jvfl@cin.ufpe.br>,
* Tomer Simis <tls@cin.ufpe.br>)
*/
/**
* Holds the maximum number of stages that will be used in the alignment algorithm.
* Each stage contains a different set of random forests and retrieves the binary
* code from a more "specialized" (i.e. smaller) region around the landmarks.
* @type {number}
* @static
*/
tracking.LBF.maxNumStages = 4;
/**
* Holds the regressor that will be responsible for extracting the local features from
* the image and guide the landmarks using the training data.
* @type {object}
* @protected
* @static
*/
tracking.LBF.regressor_ = null;
/**
* Generates a set of landmarks for a set of faces
* @param {pixels} pixels The pixels in a linear [r,g,b,a,...] array.
* @param {number} width The image width.
* @param {number} height The image height.
* @param {array} faces The list of faces detected in the image
* @return {array} The aligned landmarks, each set of landmarks corresponding
* to a specific face.
* @static
*/
tracking.LBF.align = function(pixels, width, height, faces){
if(tracking.LBF.regressor_ == null){
tracking.LBF.regressor_ = new tracking.LBF.Regressor(
tracking.LBF.maxNumStages
);
}
// NOTE: is this thesholding suitable ? if it is on image, why no skin-color filter ? and a adaptative threshold
pixels = tracking.Image.grayscale(pixels, width, height, false);
pixels = tracking.Image.equalizeHist(pixels, width, height);
var shapes = new Array(faces.length);
for(var i in faces){
faces[i].height = faces[i].width;
var boundingBox = {};
boundingBox.startX = faces[i].x;
boundingBox.startY = faces[i].y;
boundingBox.width = faces[i].width;
boundingBox.height = faces[i].height;
shapes[i] = tracking.LBF.regressor_.predict(pixels, width, height, boundingBox);
}
return shapes;
}
/**
* Unprojects the landmarks shape from the bounding box.
* @param {matrix} shape The landmarks shape.
* @param {matrix} boudingBox The bounding box.
* @return {matrix} The landmarks shape projected into the bounding box.
* @static
* @protected
*/
tracking.LBF.unprojectShapeToBoundingBox_ = function(shape, boundingBox){
var temp = new Array(shape.length);
for(var i=0; i < shape.length; i++){
temp[i] = [
(shape[i][0] - boundingBox.startX) / boundingBox.width,
(shape[i][1] - boundingBox.startY) / boundingBox.height
];
}
return temp;
}
/**
* Projects the landmarks shape into the bounding box. The landmarks shape has
* normalized coordinates, so it is necessary to map these coordinates into
* the bounding box coordinates.
* @param {matrix} shape The landmarks shape.
* @param {matrix} boudingBox The bounding box.
* @return {matrix} The landmarks shape.
* @static
* @protected
*/
tracking.LBF.projectShapeToBoundingBox_ = function(shape, boundingBox){
var temp = new Array(shape.length);
for(var i=0; i < shape.length; i++){
temp[i] = [
shape[i][0] * boundingBox.width + boundingBox.startX,
shape[i][1] * boundingBox.height + boundingBox.startY
];
}
return temp;
}
/**
* Calculates the rotation and scale necessary to transform shape1 into shape2.
* @param {matrix} shape1 The shape to be transformed.
* @param {matrix} shape2 The shape to be transformed in.
* @return {[matrix, scalar]} The rotation matrix and scale that applied to shape1
* results in shape2.
* @static
* @protected
*/
tracking.LBF.similarityTransform_ = function(shape1, shape2){
var center1 = [0,0];
var center2 = [0,0];
for (var i = 0; i < shape1.length; i++) {
center1[0] += shape1[i][0];
center1[1] += shape1[i][1];
center2[0] += shape2[i][0];
center2[1] += shape2[i][1];
}
center1[0] /= shape1.length;
center1[1] /= shape1.length;
center2[0] /= shape2.length;
center2[1] /= shape2.length;
var temp1 = tracking.Matrix.clone(shape1);
var temp2 = tracking.Matrix.clone(shape2);
for(var i=0; i < shape1.length; i++){
temp1[i][0] -= center1[0];
temp1[i][1] -= center1[1];
temp2[i][0] -= center2[0];
temp2[i][1] -= center2[1];
}
var covariance1, covariance2;
var mean1, mean2;
var t = tracking.Matrix.calcCovarMatrix(temp1);
covariance1 = t[0];
mean1 = t[1];
t = tracking.Matrix.calcCovarMatrix(temp2);
covariance2 = t[0];
mean2 = t[1];
var s1 = Math.sqrt(tracking.Matrix.norm(covariance1));
var s2 = Math.sqrt(tracking.Matrix.norm(covariance2));
var scale = s1/s2;
temp1 = tracking.Matrix.mulScalar(1.0/s1, temp1);
temp2 = tracking.Matrix.mulScalar(1.0/s2, temp2);
var num = 0, den = 0;
for (var i = 0; i < shape1.length; i++) {
num = num + temp1[i][1] * temp2[i][0] - temp1[i][0] * temp2[i][1];
den = den + temp1[i][0] * temp2[i][0] + temp1[i][1] * temp2[i][1];
}
var norm = Math.sqrt(num*num + den*den);
var sin_theta = num/norm;
var cos_theta = den/norm;
var rotation = [
[cos_theta, -sin_theta],
[sin_theta, cos_theta]
];
return [rotation, scale];
}
/**
* LBF Random Forest data structure.
* @static
* @constructor
*/
tracking.LBF.RandomForest = function(forestIndex){
this.maxNumTrees = tracking.LBF.RegressorData[forestIndex].max_numtrees;
this.landmarkNum = tracking.LBF.RegressorData[forestIndex].num_landmark;
this.maxDepth = tracking.LBF.RegressorData[forestIndex].max_depth;
this.stages = tracking.LBF.RegressorData[forestIndex].stages;
this.rfs = new Array(this.landmarkNum);
for(var i=0; i < this.landmarkNum; i++){
this.rfs[i] = new Array(this.maxNumTrees);
for(var j=0; j < this.maxNumTrees; j++){
this.rfs[i][j] = new tracking.LBF.Tree(forestIndex, i, j);
}
}
}
/**
* LBF Tree data structure.
* @static
* @constructor
*/
tracking.LBF.Tree = function(forestIndex, landmarkIndex, treeIndex){
var data = tracking.LBF.RegressorData[forestIndex].landmarks[landmarkIndex][treeIndex];
this.maxDepth = data.max_depth;
this.maxNumNodes = data.max_numnodes;
this.nodes = data.nodes;
this.landmarkID = data.landmark_id;
this.numLeafnodes = data.num_leafnodes;
this.numNodes = data.num_nodes;
this.maxNumFeats = data.max_numfeats;
this.maxRadioRadius = data.max_radio_radius;
this.leafnodes = data.id_leafnodes;
}
}());

View File

@ -0,0 +1,230 @@
(function() {
tracking.LBF = {};
/**
* LBF Regressor utility.
* @constructor
*/
tracking.LBF.Regressor = function(maxNumStages){
this.maxNumStages = maxNumStages;
this.rfs = new Array(maxNumStages);
this.models = new Array(maxNumStages);
for(var i=0; i < maxNumStages; i++){
this.rfs[i] = new tracking.LBF.RandomForest(i);
this.models[i] = tracking.LBF.RegressorData[i].models;
}
this.meanShape = tracking.LBF.LandmarksData;
}
/**
* Predicts the position of the landmarks based on the bounding box of the face.
* @param {pixels} pixels The grayscale pixels in a linear array.
* @param {number} width Width of the image.
* @param {number} height Height of the image.
* @param {object} boudingBox Bounding box of the face to be aligned.
* @return {matrix} A matrix with each landmark position in a row [x,y].
*/
tracking.LBF.Regressor.prototype.predict = function(pixels, width, height, boundingBox) {
var images = [];
var currentShapes = [];
var boundingBoxes = [];
var meanShapeClone = tracking.Matrix.clone(this.meanShape);
images.push({
'data': pixels,
'width': width,
'height': height
});
boundingBoxes.push(boundingBox);
currentShapes.push(tracking.LBF.projectShapeToBoundingBox_(meanShapeClone, boundingBox));
for(var stage = 0; stage < this.maxNumStages; stage++){
var binaryFeatures = tracking.LBF.Regressor.deriveBinaryFeat(this.rfs[stage], images, currentShapes, boundingBoxes, meanShapeClone);
this.applyGlobalPrediction(binaryFeatures, this.models[stage], currentShapes, boundingBoxes);
}
return currentShapes[0];
};
/**
* Multiplies the binary features of the landmarks with the regression matrix
* to obtain the displacement for each landmark. Then applies this displacement
* into the landmarks shape.
* @param {object} binaryFeatures The binary features for the landmarks.
* @param {object} models The regressor models.
* @param {matrix} currentShapes The landmarks shapes.
* @param {array} boudingBoxes The bounding boxes of the faces.
*/
tracking.LBF.Regressor.prototype.applyGlobalPrediction = function(binaryFeatures, models, currentShapes,
boundingBoxes){
var residual = currentShapes[0].length * 2;
var rotation = [];
var deltashape = new Array(residual/2);
for(var i=0; i < residual/2; i++){
deltashape[i] = [0.0, 0.0];
}
for(var i=0; i < currentShapes.length; i++){
for(var j=0; j < residual; j++){
var tmp = 0;
for(var lx=0, idx=0; (idx = binaryFeatures[i][lx].index) != -1; lx++){
if(idx <= models[j].nr_feature){
tmp += models[j].data[(idx - 1)] * binaryFeatures[i][lx].value;
}
}
if(j < residual/2){
deltashape[j][0] = tmp;
}else{
deltashape[j - residual/2][1] = tmp;
}
}
var res = tracking.LBF.similarityTransform_(tracking.LBF.unprojectShapeToBoundingBox_(currentShapes[i], boundingBoxes[i]), this.meanShape);
var rotation = tracking.Matrix.transpose(res[0]);
var s = tracking.LBF.unprojectShapeToBoundingBox_(currentShapes[i], boundingBoxes[i]);
s = tracking.Matrix.add(s, deltashape);
currentShapes[i] = tracking.LBF.projectShapeToBoundingBox_(s, boundingBoxes[i]);
}
};
/**
* Derives the binary features from the image for each landmark.
* @param {object} forest The random forest to search for the best binary feature match.
* @param {array} images The images with pixels in a grayscale linear array.
* @param {array} currentShapes The current landmarks shape.
* @param {array} boudingBoxes The bounding boxes of the faces.
* @param {matrix} meanShape The mean shape of the current landmarks set.
* @return {array} The binary features extracted from the image and matched with the
* training data.
* @static
*/
tracking.LBF.Regressor.deriveBinaryFeat = function(forest, images, currentShapes, boundingBoxes, meanShape){
var binaryFeatures = new Array(images.length);
for(var i=0; i < images.length; i++){
var t = forest.maxNumTrees * forest.landmarkNum + 1;
binaryFeatures[i] = new Array(t);
for(var j=0; j < t; j++){
binaryFeatures[i][j] = {};
}
}
var leafnodesPerTree = 1 << (forest.maxDepth - 1);
for(var i=0; i < images.length; i++){
var projectedShape = tracking.LBF.unprojectShapeToBoundingBox_(currentShapes[i], boundingBoxes[i]);
var transform = tracking.LBF.similarityTransform_(projectedShape, meanShape);
for(var j=0; j < forest.landmarkNum; j++){
for(var k=0; k < forest.maxNumTrees; k++){
var binaryCode = tracking.LBF.Regressor.getCodeFromTree(forest.rfs[j][k], images[i],
currentShapes[i], boundingBoxes[i], transform[0], transform[1]);
var index = j*forest.maxNumTrees + k;
binaryFeatures[i][index].index = leafnodesPerTree * index + binaryCode;
binaryFeatures[i][index].value = 1;
}
}
binaryFeatures[i][forest.landmarkNum * forest.maxNumTrees].index = -1;
binaryFeatures[i][forest.landmarkNum * forest.maxNumTrees].value = -1;
}
return binaryFeatures;
}
/**
* Gets the binary code for a specific tree in a random forest. For each landmark,
* the position from two pre-defined points are recovered from the training data
* and then the intensity of the pixels corresponding to these points is extracted
* from the image and used to traverse the trees in the random forest. At the end,
* the ending nodes will be represented by 1, and the remaining nodes by 0.
*
* +--------------------------- Random Forest -----------------------------+
* | Ø = Ending leaf |
* | |
* | O O O O O |
* | / \ / \ / \ / \ / \ |
* | O O O O O O O O O O |
* | / \ / \ / \ / \ / \ / \ / \ / \ / \ / \ |
* | Ø O O O O O Ø O O Ø O O O O Ø O O O O Ø |
* | 1 0 0 0 0 0 1 0 0 1 0 0 0 0 1 0 0 0 0 1 |
* +-----------------------------------------------------------------------+
* Final binary code for this landmark: 10000010010000100001
*
* @param {object} forest The tree to be analyzed.
* @param {array} image The image with pixels in a grayscale linear array.
* @param {matrix} shape The current landmarks shape.
* @param {object} boudingBoxes The bounding box of the face.
* @param {matrix} rotation The rotation matrix used to transform the projected landmarks
* into the mean shape.
* @param {number} scale The scale factor used to transform the projected landmarks
* into the mean shape.
* @return {number} The binary code extracted from the tree.
* @static
*/
tracking.LBF.Regressor.getCodeFromTree = function(tree, image, shape, boundingBox, rotation, scale){
var current = 0;
var bincode = 0;
while(true){
var x1 = Math.cos(tree.nodes[current].feats[0]) * tree.nodes[current].feats[2] * tree.maxRadioRadius * boundingBox.width;
var y1 = Math.sin(tree.nodes[current].feats[0]) * tree.nodes[current].feats[2] * tree.maxRadioRadius * boundingBox.height;
var x2 = Math.cos(tree.nodes[current].feats[1]) * tree.nodes[current].feats[3] * tree.maxRadioRadius * boundingBox.width;
var y2 = Math.sin(tree.nodes[current].feats[1]) * tree.nodes[current].feats[3] * tree.maxRadioRadius * boundingBox.height;
var project_x1 = rotation[0][0] * x1 + rotation[0][1] * y1;
var project_y1 = rotation[1][0] * x1 + rotation[1][1] * y1;
var real_x1 = Math.floor(project_x1 + shape[tree.landmarkID][0]);
var real_y1 = Math.floor(project_y1 + shape[tree.landmarkID][1]);
real_x1 = Math.max(0.0, Math.min(real_x1, image.height - 1.0));
real_y1 = Math.max(0.0, Math.min(real_y1, image.width - 1.0));
var project_x2 = rotation[0][0] * x2 + rotation[0][1] * y2;
var project_y2 = rotation[1][0] * x2 + rotation[1][1] * y2;
var real_x2 = Math.floor(project_x2 + shape[tree.landmarkID][0]);
var real_y2 = Math.floor(project_y2 + shape[tree.landmarkID][1]);
real_x2 = Math.max(0.0, Math.min(real_x2, image.height - 1.0));
real_y2 = Math.max(0.0, Math.min(real_y2, image.width - 1.0));
var pdf = Math.floor(image.data[real_y1*image.width + real_x1]) -
Math.floor(image.data[real_y2 * image.width +real_x2]);
if(pdf < tree.nodes[current].thresh){
current = tree.nodes[current].cnodes[0];
}else{
current = tree.nodes[current].cnodes[1];
}
if (tree.nodes[current].is_leafnode == 1) {
bincode = 1;
for (var i=0; i < tree.leafnodes.length; i++) {
if (tree.leafnodes[i] == current) {
return bincode;
}
bincode++;
}
return bincode;
}
}
return bincode;
}
}());

View File

@ -0,0 +1 @@
tracking.LBF.LandmarksData=[[.0125684,.210687],[.0357801,.468239],[.115541,.712995],[.284357,.905924],[.514999,.996969],[.742149,.899084],[.908075,.698924],[.976219,.446131],[.985153,.186042],[.102259,.0993176],[.244642,.0382795],[.405298,.0817395],[.574051,.0755624],[.736492,.0254858],[.879617,.0749889],[.494662,.182299],[.409008,.498333],[.502223,.532951],[.594788,.495907],[.201686,.196578],[.249652,.163191],[.313234,.163275],[.363542,.203391],[.628143,.197082],[.675643,.154603],[.738182,.151877],[.787497,.18301],[.321899,.669231],[.502462,.632468],[.692712,.659599],[.50761,.764184]];

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,280 @@
(function() {
/**
* ViolaJones utility.
* @static
* @constructor
*/
tracking.ViolaJones = {};
/**
* Holds the minimum area of intersection that defines when a rectangle is
* from the same group. Often when a face is matched multiple rectangles are
* classified as possible rectangles to represent the face, when they
* intersects they are grouped as one face.
* @type {number}
* @default 0.5
* @static
*/
tracking.ViolaJones.REGIONS_OVERLAP = 0.5;
/**
* Holds the HAAR cascade classifiers converted from OpenCV training.
* @type {array}
* @static
*/
tracking.ViolaJones.classifiers = {};
/**
* Detects through the HAAR cascade data rectangles matches.
* @param {pixels} pixels The pixels in a linear [r,g,b,a,...] array.
* @param {number} width The image width.
* @param {number} height The image height.
* @param {number} initialScale The initial scale to start the block
* scaling.
* @param {number} scaleFactor The scale factor to scale the feature block.
* @param {number} stepSize The block step size.
* @param {number} edgesDensity Percentage density edges inside the
* classifier block. Value from [0.0, 1.0], defaults to 0.2. If specified
* edge detection will be applied to the image to prune dead areas of the
* image, this can improve significantly performance.
* @param {number} data The HAAR cascade data.
* @return {array} Found rectangles.
* @static
*/
tracking.ViolaJones.detect = function(pixels, width, height, initialScale, scaleFactor, stepSize, edgesDensity, data) {
var total = 0;
var rects = [];
var integralImage = new Int32Array(width * height);
var integralImageSquare = new Int32Array(width * height);
var tiltedIntegralImage = new Int32Array(width * height);
var integralImageSobel;
if (edgesDensity > 0) {
integralImageSobel = new Int32Array(width * height);
}
tracking.Image.computeIntegralImage(pixels, width, height, integralImage, integralImageSquare, tiltedIntegralImage, integralImageSobel);
var minWidth = data[0];
var minHeight = data[1];
var scale = initialScale * scaleFactor;
var blockWidth = (scale * minWidth) | 0;
var blockHeight = (scale * minHeight) | 0;
while (blockWidth < width && blockHeight < height) {
var step = (scale * stepSize + 0.5) | 0;
for (var i = 0; i < (height - blockHeight); i += step) {
for (var j = 0; j < (width - blockWidth); j += step) {
if (edgesDensity > 0) {
if (this.isTriviallyExcluded(edgesDensity, integralImageSobel, i, j, width, blockWidth, blockHeight)) {
continue;
}
}
if (this.evalStages_(data, integralImage, integralImageSquare, tiltedIntegralImage, i, j, width, blockWidth, blockHeight, scale)) {
rects[total++] = {
width: blockWidth,
height: blockHeight,
x: j,
y: i
};
}
}
}
scale *= scaleFactor;
blockWidth = (scale * minWidth) | 0;
blockHeight = (scale * minHeight) | 0;
}
return this.mergeRectangles_(rects);
};
/**
* Fast check to test whether the edges density inside the block is greater
* than a threshold, if true it tests the stages. This can improve
* significantly performance.
* @param {number} edgesDensity Percentage density edges inside the
* classifier block.
* @param {array} integralImageSobel The integral image of a sobel image.
* @param {number} i Vertical position of the pixel to be evaluated.
* @param {number} j Horizontal position of the pixel to be evaluated.
* @param {number} width The image width.
* @return {boolean} True whether the block at position i,j can be skipped,
* false otherwise.
* @static
* @protected
*/
tracking.ViolaJones.isTriviallyExcluded = function(edgesDensity, integralImageSobel, i, j, width, blockWidth, blockHeight) {
var wbA = i * width + j;
var wbB = wbA + blockWidth;
var wbD = wbA + blockHeight * width;
var wbC = wbD + blockWidth;
var blockEdgesDensity = (integralImageSobel[wbA] - integralImageSobel[wbB] - integralImageSobel[wbD] + integralImageSobel[wbC]) / (blockWidth * blockHeight * 255);
if (blockEdgesDensity < edgesDensity) {
return true;
}
return false;
};
/**
* Evaluates if the block size on i,j position is a valid HAAR cascade
* stage.
* @param {number} data The HAAR cascade data.
* @param {number} i Vertical position of the pixel to be evaluated.
* @param {number} j Horizontal position of the pixel to be evaluated.
* @param {number} width The image width.
* @param {number} blockSize The block size.
* @param {number} scale The scale factor of the block size and its original
* size.
* @param {number} inverseArea The inverse area of the block size.
* @return {boolean} Whether the region passes all the stage tests.
* @private
* @static
*/
tracking.ViolaJones.evalStages_ = function(data, integralImage, integralImageSquare, tiltedIntegralImage, i, j, width, blockWidth, blockHeight, scale) {
var inverseArea = 1.0 / (blockWidth * blockHeight);
var wbA = i * width + j;
var wbB = wbA + blockWidth;
var wbD = wbA + blockHeight * width;
var wbC = wbD + blockWidth;
var mean = (integralImage[wbA] - integralImage[wbB] - integralImage[wbD] + integralImage[wbC]) * inverseArea;
var variance = (integralImageSquare[wbA] - integralImageSquare[wbB] - integralImageSquare[wbD] + integralImageSquare[wbC]) * inverseArea - mean * mean;
var standardDeviation = 1;
if (variance > 0) {
standardDeviation = Math.sqrt(variance);
}
var length = data.length;
for (var w = 2; w < length; ) {
var stageSum = 0;
var stageThreshold = data[w++];
var nodeLength = data[w++];
while (nodeLength--) {
var rectsSum = 0;
var tilted = data[w++];
var rectsLength = data[w++];
for (var r = 0; r < rectsLength; r++) {
var rectLeft = (j + data[w++] * scale + 0.5) | 0;
var rectTop = (i + data[w++] * scale + 0.5) | 0;
var rectWidth = (data[w++] * scale + 0.5) | 0;
var rectHeight = (data[w++] * scale + 0.5) | 0;
var rectWeight = data[w++];
var w1;
var w2;
var w3;
var w4;
if (tilted) {
// RectSum(r) = RSAT(x-h+w, y+w+h-1) + RSAT(x, y-1) - RSAT(x-h, y+h-1) - RSAT(x+w, y+w-1)
w1 = (rectLeft - rectHeight + rectWidth) + (rectTop + rectWidth + rectHeight - 1) * width;
w2 = rectLeft + (rectTop - 1) * width;
w3 = (rectLeft - rectHeight) + (rectTop + rectHeight - 1) * width;
w4 = (rectLeft + rectWidth) + (rectTop + rectWidth - 1) * width;
rectsSum += (tiltedIntegralImage[w1] + tiltedIntegralImage[w2] - tiltedIntegralImage[w3] - tiltedIntegralImage[w4]) * rectWeight;
} else {
// RectSum(r) = SAT(x-1, y-1) + SAT(x+w-1, y+h-1) - SAT(x-1, y+h-1) - SAT(x+w-1, y-1)
w1 = rectTop * width + rectLeft;
w2 = w1 + rectWidth;
w3 = w1 + rectHeight * width;
w4 = w3 + rectWidth;
rectsSum += (integralImage[w1] - integralImage[w2] - integralImage[w3] + integralImage[w4]) * rectWeight;
// TODO: Review the code below to analyze performance when using it instead.
// w1 = (rectLeft - 1) + (rectTop - 1) * width;
// w2 = (rectLeft + rectWidth - 1) + (rectTop + rectHeight - 1) * width;
// w3 = (rectLeft - 1) + (rectTop + rectHeight - 1) * width;
// w4 = (rectLeft + rectWidth - 1) + (rectTop - 1) * width;
// rectsSum += (integralImage[w1] + integralImage[w2] - integralImage[w3] - integralImage[w4]) * rectWeight;
}
}
var nodeThreshold = data[w++];
var nodeLeft = data[w++];
var nodeRight = data[w++];
if (rectsSum * inverseArea < nodeThreshold * standardDeviation) {
stageSum += nodeLeft;
} else {
stageSum += nodeRight;
}
}
if (stageSum < stageThreshold) {
return false;
}
}
return true;
};
/**
* Postprocess the detected sub-windows in order to combine overlapping
* detections into a single detection.
* @param {array} rects
* @return {array}
* @private
* @static
*/
tracking.ViolaJones.mergeRectangles_ = function(rects) {
var disjointSet = new tracking.DisjointSet(rects.length);
for (var i = 0; i < rects.length; i++) {
var r1 = rects[i];
for (var j = 0; j < rects.length; j++) {
var r2 = rects[j];
if (tracking.Math.intersectRect(r1.x, r1.y, r1.x + r1.width, r1.y + r1.height, r2.x, r2.y, r2.x + r2.width, r2.y + r2.height)) {
var x1 = Math.max(r1.x, r2.x);
var y1 = Math.max(r1.y, r2.y);
var x2 = Math.min(r1.x + r1.width, r2.x + r2.width);
var y2 = Math.min(r1.y + r1.height, r2.y + r2.height);
var overlap = (x1 - x2) * (y1 - y2);
var area1 = (r1.width * r1.height);
var area2 = (r2.width * r2.height);
if ((overlap / (area1 * (area1 / area2)) >= this.REGIONS_OVERLAP) &&
(overlap / (area2 * (area1 / area2)) >= this.REGIONS_OVERLAP)) {
disjointSet.union(i, j);
}
}
}
}
var map = {};
for (var k = 0; k < disjointSet.length; k++) {
var rep = disjointSet.find(k);
if (!map[rep]) {
map[rep] = {
total: 1,
width: rects[k].width,
height: rects[k].height,
x: rects[k].x,
y: rects[k].y
};
continue;
}
map[rep].total++;
map[rep].width += rects[k].width;
map[rep].height += rects[k].height;
map[rep].x += rects[k].x;
map[rep].y += rects[k].y;
}
var result = [];
Object.keys(map).forEach(function(key) {
var rect = map[key];
result.push({
total: rect.total,
width: (rect.width / rect.total + 0.5) | 0,
height: (rect.height / rect.total + 0.5) | 0,
x: (rect.x / rect.total + 0.5) | 0,
y: (rect.y / rect.total + 0.5) | 0
});
});
return result;
};
}());

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,198 @@
(function() {
/**
* Brief intends for "Binary Robust Independent Elementary Features".This
* method generates a binary string for each keypoint found by an extractor
* method.
* @static
* @constructor
*/
tracking.Brief = {};
/**
* The set of binary tests is defined by the nd (x,y)-location pairs
* uniquely chosen during the initialization. Values could vary between N =
* 128,256,512. N=128 yield good compromises between speed, storage
* efficiency, and recognition rate.
* @type {number}
*/
tracking.Brief.N = 512;
/**
* Caches coordinates values of (x,y)-location pairs uniquely chosen during
* the initialization.
* @type {Object.<number, Int32Array>}
* @private
* @static
*/
tracking.Brief.randomImageOffsets_ = {};
/**
* Caches delta values of (x,y)-location pairs uniquely chosen during
* the initialization.
* @type {Int32Array}
* @private
* @static
*/
tracking.Brief.randomWindowOffsets_ = null;
/**
* Generates a binary string for each found keypoints extracted using an
* extractor method.
* @param {array} The grayscale pixels in a linear [p1,p2,...] array.
* @param {number} width The image width.
* @param {array} keypoints
* @return {Int32Array} Returns an array where for each four sequence int
* values represent the descriptor binary string (128 bits) necessary
* to describe the corner, e.g. [0,0,0,0, 0,0,0,0, ...].
* @static
*/
tracking.Brief.getDescriptors = function(pixels, width, keypoints) {
// Optimizing divide by 32 operation using binary shift
// (this.N >> 5) === this.N/32.
var descriptors = new Int32Array((keypoints.length >> 1) * (this.N >> 5));
var descriptorWord = 0;
var offsets = this.getRandomOffsets_(width);
var position = 0;
for (var i = 0; i < keypoints.length; i += 2) {
var w = width * keypoints[i + 1] + keypoints[i];
var offsetsPosition = 0;
for (var j = 0, n = this.N; j < n; j++) {
if (pixels[offsets[offsetsPosition++] + w] < pixels[offsets[offsetsPosition++] + w]) {
// The bit in the position `j % 32` of descriptorWord should be set to 1. We do
// this by making an OR operation with a binary number that only has the bit
// in that position set to 1. That binary number is obtained by shifting 1 left by
// `j % 32` (which is the same as `j & 31` left) positions.
descriptorWord |= 1 << (j & 31);
}
// If the next j is a multiple of 32, we will need to use a new descriptor word to hold
// the next results.
if (!((j + 1) & 31)) {
descriptors[position++] = descriptorWord;
descriptorWord = 0;
}
}
}
return descriptors;
};
/**
* Matches sets of features {mi} and {mj} extracted from two images taken
* from similar, and often successive, viewpoints. A classical procedure
* runs as follows. For each point {mi} in the first image, search in a
* region of the second image around location {mi} for point {mj}. The
* search is based on the similarity of the local image windows, also known
* as kernel windows, centered on the points, which strongly characterizes
* the points when the images are sufficiently close. Once each keypoint is
* described with its binary string, they need to be compared with the
* closest matching point. Distance metric is critical to the performance of
* in- trusion detection systems. Thus using binary strings reduces the size
* of the descriptor and provides an interesting data structure that is fast
* to operate whose similarity can be measured by the Hamming distance.
* @param {array} keypoints1
* @param {array} descriptors1
* @param {array} keypoints2
* @param {array} descriptors2
* @return {Int32Array} Returns an array where the index is the corner1
* index coordinate, and the value is the corresponding match index of
* corner2, e.g. keypoints1=[x0,y0,x1,y1,...] and
* keypoints2=[x'0,y'0,x'1,y'1,...], if x0 matches x'1 and x1 matches x'0,
* the return array would be [3,0].
* @static
*/
tracking.Brief.match = function(keypoints1, descriptors1, keypoints2, descriptors2) {
var len1 = keypoints1.length >> 1;
var len2 = keypoints2.length >> 1;
var matches = new Array(len1);
for (var i = 0; i < len1; i++) {
var min = Infinity;
var minj = 0;
for (var j = 0; j < len2; j++) {
var dist = 0;
// Optimizing divide by 32 operation using binary shift
// (this.N >> 5) === this.N/32.
for (var k = 0, n = this.N >> 5; k < n; k++) {
dist += tracking.Math.hammingWeight(descriptors1[i * n + k] ^ descriptors2[j * n + k]);
}
if (dist < min) {
min = dist;
minj = j;
}
}
matches[i] = {
index1: i,
index2: minj,
keypoint1: [keypoints1[2 * i], keypoints1[2 * i + 1]],
keypoint2: [keypoints2[2 * minj], keypoints2[2 * minj + 1]],
confidence: 1 - min / this.N
};
}
return matches;
};
/**
* Removes matches outliers by testing matches on both directions.
* @param {array} keypoints1
* @param {array} descriptors1
* @param {array} keypoints2
* @param {array} descriptors2
* @return {Int32Array} Returns an array where the index is the corner1
* index coordinate, and the value is the corresponding match index of
* corner2, e.g. keypoints1=[x0,y0,x1,y1,...] and
* keypoints2=[x'0,y'0,x'1,y'1,...], if x0 matches x'1 and x1 matches x'0,
* the return array would be [3,0].
* @static
*/
tracking.Brief.reciprocalMatch = function(keypoints1, descriptors1, keypoints2, descriptors2) {
var matches = [];
if (keypoints1.length === 0 || keypoints2.length === 0) {
return matches;
}
var matches1 = tracking.Brief.match(keypoints1, descriptors1, keypoints2, descriptors2);
var matches2 = tracking.Brief.match(keypoints2, descriptors2, keypoints1, descriptors1);
for (var i = 0; i < matches1.length; i++) {
if (matches2[matches1[i].index2].index2 === i) {
matches.push(matches1[i]);
}
}
return matches;
};
/**
* Gets the coordinates values of (x,y)-location pairs uniquely chosen
* during the initialization.
* @return {array} Array with the random offset values.
* @private
*/
tracking.Brief.getRandomOffsets_ = function(width) {
if (!this.randomWindowOffsets_) {
var windowPosition = 0;
var windowOffsets = new Int32Array(4 * this.N);
for (var i = 0; i < this.N; i++) {
windowOffsets[windowPosition++] = Math.round(tracking.Math.uniformRandom(-15, 16));
windowOffsets[windowPosition++] = Math.round(tracking.Math.uniformRandom(-15, 16));
windowOffsets[windowPosition++] = Math.round(tracking.Math.uniformRandom(-15, 16));
windowOffsets[windowPosition++] = Math.round(tracking.Math.uniformRandom(-15, 16));
}
this.randomWindowOffsets_ = windowOffsets;
}
if (!this.randomImageOffsets_[width]) {
var imagePosition = 0;
var imageOffsets = new Int32Array(2 * this.N);
for (var j = 0; j < this.N; j++) {
imageOffsets[imagePosition++] = this.randomWindowOffsets_[4 * j] * width + this.randomWindowOffsets_[4 * j + 1];
imageOffsets[imagePosition++] = this.randomWindowOffsets_[4 * j + 2] * width + this.randomWindowOffsets_[4 * j + 3];
}
this.randomImageOffsets_[width] = imageOffsets;
}
return this.randomImageOffsets_[width];
};
}());

View File

@ -0,0 +1,250 @@
(function() {
/**
* FAST intends for "Features from Accelerated Segment Test". This method
* performs a point segment test corner detection. The segment test
* criterion operates by considering a circle of sixteen pixels around the
* corner candidate p. The detector classifies p as a corner if there exists
* a set of n contiguous pixelsin the circle which are all brighter than the
* intensity of the candidate pixel Ip plus a threshold t, or all darker
* than Ip t.
*
* 15 00 01
* 14 02
* 13 03
* 12 [] 04
* 11 05
* 10 06
* 09 08 07
*
* For more reference:
* http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.60.3991&rep=rep1&type=pdf
* @static
* @constructor
*/
tracking.Fast = {};
/**
* Holds the threshold to determine whether the tested pixel is brighter or
* darker than the corner candidate p.
* @type {number}
* @default 40
* @static
*/
tracking.Fast.THRESHOLD = 40;
/**
* Caches coordinates values of the circle surrounding the pixel candidate p.
* @type {Object.<number, Int32Array>}
* @private
* @static
*/
tracking.Fast.circles_ = {};
/**
* Finds corners coordinates on the graysacaled image.
* @param {array} The grayscale pixels in a linear [p1,p2,...] array.
* @param {number} width The image width.
* @param {number} height The image height.
* @param {number} threshold to determine whether the tested pixel is brighter or
* darker than the corner candidate p. Default value is 40.
* @return {array} Array containing the coordinates of all found corners,
* e.g. [x0,y0,x1,y1,...], where P(x0,y0) represents a corner coordinate.
* @static
*/
tracking.Fast.findCorners = function(pixels, width, height, opt_threshold) {
var circleOffsets = this.getCircleOffsets_(width);
var circlePixels = new Int32Array(16);
var corners = [];
if (opt_threshold === undefined) {
opt_threshold = this.THRESHOLD;
}
// When looping through the image pixels, skips the first three lines from
// the image boundaries to constrain the surrounding circle inside the image
// area.
for (var i = 3; i < height - 3; i++) {
for (var j = 3; j < width - 3; j++) {
var w = i * width + j;
var p = pixels[w];
// Loops the circle offsets to read the pixel value for the sixteen
// surrounding pixels.
for (var k = 0; k < 16; k++) {
circlePixels[k] = pixels[w + circleOffsets[k]];
}
if (this.isCorner(p, circlePixels, opt_threshold)) {
// The pixel p is classified as a corner, as optimization increment j
// by the circle radius 3 to skip the neighbor pixels inside the
// surrounding circle. This can be removed without compromising the
// result.
corners.push(j, i);
j += 3;
}
}
}
return corners;
};
/**
* Checks if the circle pixel is brighter than the candidate pixel p by
* a threshold.
* @param {number} circlePixel The circle pixel value.
* @param {number} p The value of the candidate pixel p.
* @param {number} threshold
* @return {Boolean}
* @static
*/
tracking.Fast.isBrighter = function(circlePixel, p, threshold) {
return circlePixel - p > threshold;
};
/**
* Checks if the circle pixel is within the corner of the candidate pixel p
* by a threshold.
* @param {number} p The value of the candidate pixel p.
* @param {number} circlePixel The circle pixel value.
* @param {number} threshold
* @return {Boolean}
* @static
*/
tracking.Fast.isCorner = function(p, circlePixels, threshold) {
if (this.isTriviallyExcluded(circlePixels, p, threshold)) {
return false;
}
for (var x = 0; x < 16; x++) {
var darker = true;
var brighter = true;
for (var y = 0; y < 9; y++) {
var circlePixel = circlePixels[(x + y) & 15];
if (!this.isBrighter(p, circlePixel, threshold)) {
brighter = false;
if (darker === false) {
break;
}
}
if (!this.isDarker(p, circlePixel, threshold)) {
darker = false;
if (brighter === false) {
break;
}
}
}
if (brighter || darker) {
return true;
}
}
return false;
};
/**
* Checks if the circle pixel is darker than the candidate pixel p by
* a threshold.
* @param {number} circlePixel The circle pixel value.
* @param {number} p The value of the candidate pixel p.
* @param {number} threshold
* @return {Boolean}
* @static
*/
tracking.Fast.isDarker = function(circlePixel, p, threshold) {
return p - circlePixel > threshold;
};
/**
* Fast check to test if the candidate pixel is a trivially excluded value.
* In order to be a corner, the candidate pixel value should be darker or
* brighter than 9-12 surrounding pixels, when at least three of the top,
* bottom, left and right pixels are brighter or darker it can be
* automatically excluded improving the performance.
* @param {number} circlePixel The circle pixel value.
* @param {number} p The value of the candidate pixel p.
* @param {number} threshold
* @return {Boolean}
* @static
* @protected
*/
tracking.Fast.isTriviallyExcluded = function(circlePixels, p, threshold) {
var count = 0;
var circleBottom = circlePixels[8];
var circleLeft = circlePixels[12];
var circleRight = circlePixels[4];
var circleTop = circlePixels[0];
if (this.isBrighter(circleTop, p, threshold)) {
count++;
}
if (this.isBrighter(circleRight, p, threshold)) {
count++;
}
if (this.isBrighter(circleBottom, p, threshold)) {
count++;
}
if (this.isBrighter(circleLeft, p, threshold)) {
count++;
}
if (count < 3) {
count = 0;
if (this.isDarker(circleTop, p, threshold)) {
count++;
}
if (this.isDarker(circleRight, p, threshold)) {
count++;
}
if (this.isDarker(circleBottom, p, threshold)) {
count++;
}
if (this.isDarker(circleLeft, p, threshold)) {
count++;
}
if (count < 3) {
return true;
}
}
return false;
};
/**
* Gets the sixteen offset values of the circle surrounding pixel.
* @param {number} width The image width.
* @return {array} Array with the sixteen offset values of the circle
* surrounding pixel.
* @private
*/
tracking.Fast.getCircleOffsets_ = function(width) {
if (this.circles_[width]) {
return this.circles_[width];
}
var circle = new Int32Array(16);
circle[0] = -width - width - width;
circle[1] = circle[0] + 1;
circle[2] = circle[1] + width + 1;
circle[3] = circle[2] + width + 1;
circle[4] = circle[3] + width;
circle[5] = circle[4] + width;
circle[6] = circle[5] + width - 1;
circle[7] = circle[6] + width - 1;
circle[8] = circle[7] - 1;
circle[9] = circle[8] - 1;
circle[10] = circle[9] - width - 1;
circle[11] = circle[10] - width - 1;
circle[12] = circle[11] - width;
circle[13] = circle[12] - width;
circle[14] = circle[13] - width + 1;
circle[15] = circle[14] - width + 1;
this.circles_[width] = circle;
return circle;
};
}());

View File

@ -0,0 +1,82 @@
(function() {
/**
* Math utility.
* @static
* @constructor
*/
tracking.Math = {};
/**
* Euclidean distance between two points P(x0, y0) and P(x1, y1).
* @param {number} x0 Horizontal coordinate of P0.
* @param {number} y0 Vertical coordinate of P0.
* @param {number} x1 Horizontal coordinate of P1.
* @param {number} y1 Vertical coordinate of P1.
* @return {number} The euclidean distance.
*/
tracking.Math.distance = function(x0, y0, x1, y1) {
var dx = x1 - x0;
var dy = y1 - y0;
return Math.sqrt(dx * dx + dy * dy);
};
/**
* Calculates the Hamming weight of a string, which is the number of symbols that are
* different from the zero-symbol of the alphabet used. It is thus
* equivalent to the Hamming distance from the all-zero string of the same
* length. For the most typical case, a string of bits, this is the number
* of 1's in the string.
*
* Example:
*
* <pre>
* Binary string Hamming weight
* 11101 4
* 11101010 5
* </pre>
*
* @param {number} i Number that holds the binary string to extract the hamming weight.
* @return {number} The hamming weight.
*/
tracking.Math.hammingWeight = function(i) {
i = i - ((i >> 1) & 0x55555555);
i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
return ((i + (i >> 4) & 0xF0F0F0F) * 0x1010101) >> 24;
};
/**
* Generates a random number between [a, b] interval.
* @param {number} a
* @param {number} b
* @return {number}
*/
tracking.Math.uniformRandom = function(a, b) {
return a + Math.random() * (b - a);
};
/**
* Tests if a rectangle intersects with another.
*
* <pre>
* x0y0 -------- x2y2 --------
* | | | |
* -------- x1y1 -------- x3y3
* </pre>
*
* @param {number} x0 Horizontal coordinate of P0.
* @param {number} y0 Vertical coordinate of P0.
* @param {number} x1 Horizontal coordinate of P1.
* @param {number} y1 Vertical coordinate of P1.
* @param {number} x2 Horizontal coordinate of P2.
* @param {number} y2 Vertical coordinate of P2.
* @param {number} x3 Horizontal coordinate of P3.
* @param {number} y3 Vertical coordinate of P3.
* @return {boolean}
*/
tracking.Math.intersectRect = function(x0, y0, x1, y1, x2, y2, x3, y3) {
return !(x2 > x1 || x3 < x0 || y2 > y1 || y3 < y0);
};
}());

View File

@ -0,0 +1,185 @@
(function() {
/**
* Matrix utility.
* @static
* @constructor
*/
tracking.Matrix = {};
/**
* Loops the array organized as major-row order and executes `fn` callback
* for each iteration. The `fn` callback receives the following parameters:
* `(r,g,b,a,index,i,j)`, where `r,g,b,a` represents the pixel color with
* alpha channel, `index` represents the position in the major-row order
* array and `i,j` the respective indexes positions in two dimensions.
* @param {array} pixels The pixels in a linear [r,g,b,a,...] array to loop
* through.
* @param {number} width The image width.
* @param {number} height The image height.
* @param {function} fn The callback function for each pixel.
* @param {number} opt_jump Optional jump for the iteration, by default it
* is 1, hence loops all the pixels of the array.
* @static
*/
tracking.Matrix.forEach = function(pixels, width, height, fn, opt_jump) {
opt_jump = opt_jump || 1;
for (var i = 0; i < height; i += opt_jump) {
for (var j = 0; j < width; j += opt_jump) {
var w = i * width * 4 + j * 4;
fn.call(this, pixels[w], pixels[w + 1], pixels[w + 2], pixels[w + 3], w, i, j);
}
}
};
/**
* Calculates the per-element subtraction of two NxM matrices and returns a
* new NxM matrix as the result.
* @param {matrix} a The first matrix.
* @param {matrix} a The second matrix.
* @static
*/
tracking.Matrix.sub = function(a, b){
var res = tracking.Matrix.clone(a);
for(var i=0; i < res.length; i++){
for(var j=0; j < res[i].length; j++){
res[i][j] -= b[i][j];
}
}
return res;
}
/**
* Calculates the per-element sum of two NxM matrices and returns a new NxM
* NxM matrix as the result.
* @param {matrix} a The first matrix.
* @param {matrix} a The second matrix.
* @static
*/
tracking.Matrix.add = function(a, b){
var res = tracking.Matrix.clone(a);
for(var i=0; i < res.length; i++){
for(var j=0; j < res[i].length; j++){
res[i][j] += b[i][j];
}
}
return res;
}
/**
* Clones a matrix (or part of it) and returns a new matrix as the result.
* @param {matrix} src The matrix to be cloned.
* @param {number} width The second matrix.
* @static
*/
tracking.Matrix.clone = function(src, width, height){
width = width || src[0].length;
height = height || src.length;
var temp = new Array(height);
var i = height;
while(i--){
temp[i] = new Array(width);
var j = width;
while(j--) temp[i][j] = src[i][j];
}
return temp;
}
/**
* Multiply a matrix by a scalar and returns a new matrix as the result.
* @param {number} scalar The scalar to multiply the matrix by.
* @param {matrix} src The matrix to be multiplied.
* @static
*/
tracking.Matrix.mulScalar = function(scalar, src){
var res = tracking.Matrix.clone(src);
for(var i=0; i < src.length; i++){
for(var j=0; j < src[i].length; j++){
res[i][j] *= scalar;
}
}
return res;
}
/**
* Transpose a matrix and returns a new matrix as the result.
* @param {matrix} src The matrix to be transposed.
* @static
*/
tracking.Matrix.transpose = function(src){
var transpose = new Array(src[0].length);
for(var i=0; i < src[0].length; i++){
transpose[i] = new Array(src.length);
for(var j=0; j < src.length; j++){
transpose[i][j] = src[j][i];
}
}
return transpose;
}
/**
* Multiply an MxN matrix with an NxP matrix and returns a new MxP matrix
* as the result.
* @param {matrix} a The first matrix.
* @param {matrix} b The second matrix.
* @static
*/
tracking.Matrix.mul = function(a, b) {
var res = new Array(a.length);
for (var i = 0; i < a.length; i++) {
res[i] = new Array(b[0].length);
for (var j = 0; j < b[0].length; j++) {
res[i][j] = 0;
for (var k = 0; k < a[0].length; k++) {
res[i][j] += a[i][k] * b[k][j];
}
}
}
return res;
}
/**
* Calculates the absolute norm of a matrix.
* @param {matrix} src The matrix which norm will be calculated.
* @static
*/
tracking.Matrix.norm = function(src){
var res = 0;
for(var i=0; i < src.length; i++){
for(var j=0; j < src[i].length; j++){
res += src[i][j]*src[i][j];
}
}
return Math.sqrt(res);
}
/**
* Calculates and returns the covariance matrix of a set of vectors as well
* as the mean of the matrix.
* @param {matrix} src The matrix which covariance matrix will be calculated.
* @static
*/
tracking.Matrix.calcCovarMatrix = function(src){
var mean = new Array(src.length);
for(var i=0; i < src.length; i++){
mean[i] = [0.0];
for(var j=0; j < src[i].length; j++){
mean[i][0] += src[i][j]/src[i].length;
}
}
var deltaFull = tracking.Matrix.clone(mean);
for(var i=0; i < deltaFull.length; i++){
for(var j=0; j < src[0].length - 1; j++){
deltaFull[i].push(deltaFull[i][0]);
}
}
var a = tracking.Matrix.sub(src, deltaFull);
var b = tracking.Matrix.transpose(a);
var covar = tracking.Matrix.mul(b,a);
return [covar, mean];
}
}());

View File

@ -0,0 +1,10 @@
(function() {
/**
* EPnp utility.
* @static
* @constructor
*/
tracking.EPnP = {};
tracking.EPnP.solve = function(objectPoints, imagePoints, cameraMatrix) {};
}());

View File

@ -0,0 +1,425 @@
(function() {
/**
* ColorTracker utility to track colored blobs in a frame using color
* difference evaluation.
* @constructor
* @param {string|Array.<string>} opt_colors Optional colors to track.
* @extends {tracking.Tracker}
*/
tracking.ColorTracker = function(opt_colors) {
tracking.ColorTracker.base(this, 'constructor');
if (typeof opt_colors === 'string') {
opt_colors = [opt_colors];
}
if (opt_colors) {
opt_colors.forEach(function(color) {
if (!tracking.ColorTracker.getColor(color)) {
throw new Error('Color not valid, try `new tracking.ColorTracker("magenta")`.');
}
});
this.setColors(opt_colors);
}
};
tracking.inherits(tracking.ColorTracker, tracking.Tracker);
/**
* Holds the known colors.
* @type {Object.<string, function>}
* @private
* @static
*/
tracking.ColorTracker.knownColors_ = {};
/**
* Caches coordinates values of the neighbours surrounding a pixel.
* @type {Object.<number, Int32Array>}
* @private
* @static
*/
tracking.ColorTracker.neighbours_ = {};
/**
* Registers a color as known color.
* @param {string} name The color name.
* @param {function} fn The color function to test if the passed (r,g,b) is
* the desired color.
* @static
*/
tracking.ColorTracker.registerColor = function(name, fn) {
tracking.ColorTracker.knownColors_[name] = fn;
};
/**
* Gets the known color function that is able to test whether an (r,g,b) is
* the desired color.
* @param {string} name The color name.
* @return {function} The known color test function.
* @static
*/
tracking.ColorTracker.getColor = function(name) {
return tracking.ColorTracker.knownColors_[name];
};
/**
* Holds the colors to be tracked by the `ColorTracker` instance.
* @default ['magenta']
* @type {Array.<string>}
*/
tracking.ColorTracker.prototype.colors = ['magenta'];
/**
* Holds the minimum dimension to classify a rectangle.
* @default 20
* @type {number}
*/
tracking.ColorTracker.prototype.minDimension = 20;
/**
* Holds the maximum dimension to classify a rectangle.
* @default Infinity
* @type {number}
*/
tracking.ColorTracker.prototype.maxDimension = Infinity;
/**
* Holds the minimum group size to be classified as a rectangle.
* @default 30
* @type {number}
*/
tracking.ColorTracker.prototype.minGroupSize = 30;
/**
* Calculates the central coordinate from the cloud points. The cloud points
* are all points that matches the desired color.
* @param {Array.<number>} cloud Major row order array containing all the
* points from the desired color, e.g. [x1, y1, c2, y2, ...].
* @param {number} total Total numbers of pixels of the desired color.
* @return {object} Object containing the x, y and estimated z coordinate of
* the blog extracted from the cloud points.
* @private
*/
tracking.ColorTracker.prototype.calculateDimensions_ = function(cloud, total) {
var maxx = -1;
var maxy = -1;
var minx = Infinity;
var miny = Infinity;
for (var c = 0; c < total; c += 2) {
var x = cloud[c];
var y = cloud[c + 1];
if (x < minx) {
minx = x;
}
if (x > maxx) {
maxx = x;
}
if (y < miny) {
miny = y;
}
if (y > maxy) {
maxy = y;
}
}
return {
width: maxx - minx,
height: maxy - miny,
x: minx,
y: miny
};
};
/**
* Gets the colors being tracked by the `ColorTracker` instance.
* @return {Array.<string>}
*/
tracking.ColorTracker.prototype.getColors = function() {
return this.colors;
};
/**
* Gets the minimum dimension to classify a rectangle.
* @return {number}
*/
tracking.ColorTracker.prototype.getMinDimension = function() {
return this.minDimension;
};
/**
* Gets the maximum dimension to classify a rectangle.
* @return {number}
*/
tracking.ColorTracker.prototype.getMaxDimension = function() {
return this.maxDimension;
};
/**
* Gets the minimum group size to be classified as a rectangle.
* @return {number}
*/
tracking.ColorTracker.prototype.getMinGroupSize = function() {
return this.minGroupSize;
};
/**
* Gets the eight offset values of the neighbours surrounding a pixel.
* @param {number} width The image width.
* @return {array} Array with the eight offset values of the neighbours
* surrounding a pixel.
* @private
*/
tracking.ColorTracker.prototype.getNeighboursForWidth_ = function(width) {
if (tracking.ColorTracker.neighbours_[width]) {
return tracking.ColorTracker.neighbours_[width];
}
var neighbours = new Int32Array(8);
neighbours[0] = -width * 4;
neighbours[1] = -width * 4 + 4;
neighbours[2] = 4;
neighbours[3] = width * 4 + 4;
neighbours[4] = width * 4;
neighbours[5] = width * 4 - 4;
neighbours[6] = -4;
neighbours[7] = -width * 4 - 4;
tracking.ColorTracker.neighbours_[width] = neighbours;
return neighbours;
};
/**
* Unites groups whose bounding box intersect with each other.
* @param {Array.<Object>} rects
* @private
*/
tracking.ColorTracker.prototype.mergeRectangles_ = function(rects) {
var intersects;
var results = [];
var minDimension = this.getMinDimension();
var maxDimension = this.getMaxDimension();
for (var r = 0; r < rects.length; r++) {
var r1 = rects[r];
intersects = true;
for (var s = r + 1; s < rects.length; s++) {
var r2 = rects[s];
if (tracking.Math.intersectRect(r1.x, r1.y, r1.x + r1.width, r1.y + r1.height, r2.x, r2.y, r2.x + r2.width, r2.y + r2.height)) {
intersects = false;
var x1 = Math.min(r1.x, r2.x);
var y1 = Math.min(r1.y, r2.y);
var x2 = Math.max(r1.x + r1.width, r2.x + r2.width);
var y2 = Math.max(r1.y + r1.height, r2.y + r2.height);
r2.height = y2 - y1;
r2.width = x2 - x1;
r2.x = x1;
r2.y = y1;
break;
}
}
if (intersects) {
if (r1.width >= minDimension && r1.height >= minDimension) {
if (r1.width <= maxDimension && r1.height <= maxDimension) {
results.push(r1);
}
}
}
}
return results;
};
/**
* Sets the colors to be tracked by the `ColorTracker` instance.
* @param {Array.<string>} colors
*/
tracking.ColorTracker.prototype.setColors = function(colors) {
this.colors = colors;
};
/**
* Sets the minimum dimension to classify a rectangle.
* @param {number} minDimension
*/
tracking.ColorTracker.prototype.setMinDimension = function(minDimension) {
this.minDimension = minDimension;
};
/**
* Sets the maximum dimension to classify a rectangle.
* @param {number} maxDimension
*/
tracking.ColorTracker.prototype.setMaxDimension = function(maxDimension) {
this.maxDimension = maxDimension;
};
/**
* Sets the minimum group size to be classified as a rectangle.
* @param {number} minGroupSize
*/
tracking.ColorTracker.prototype.setMinGroupSize = function(minGroupSize) {
this.minGroupSize = minGroupSize;
};
/**
* Tracks the `Video` frames. This method is called for each video frame in
* order to emit `track` event.
* @param {Uint8ClampedArray} pixels The pixels data to track.
* @param {number} width The pixels canvas width.
* @param {number} height The pixels canvas height.
*/
tracking.ColorTracker.prototype.track = function(pixels, width, height) {
var self = this;
var colors = this.getColors();
if (!colors) {
throw new Error('Colors not specified, try `new tracking.ColorTracker("magenta")`.');
}
var results = [];
colors.forEach(function(color) {
results = results.concat(self.trackColor_(pixels, width, height, color));
});
this.emit('track', {
data: results
});
};
/**
* Find the given color in the given matrix of pixels using Flood fill
* algorithm to determines the area connected to a given node in a
* multi-dimensional array.
* @param {Uint8ClampedArray} pixels The pixels data to track.
* @param {number} width The pixels canvas width.
* @param {number} height The pixels canvas height.
* @param {string} color The color to be found
* @private
*/
tracking.ColorTracker.prototype.trackColor_ = function(pixels, width, height, color) {
var colorFn = tracking.ColorTracker.knownColors_[color];
var currGroup = new Int32Array(pixels.length >> 2);
var currGroupSize;
var currI;
var currJ;
var currW;
var marked = new Int8Array(pixels.length);
var minGroupSize = this.getMinGroupSize();
var neighboursW = this.getNeighboursForWidth_(width);
var queue = new Int32Array(pixels.length);
var queuePosition;
var results = [];
var w = -4;
if (!colorFn) {
return results;
}
for (var i = 0; i < height; i++) {
for (var j = 0; j < width; j++) {
w += 4;
if (marked[w]) {
continue;
}
currGroupSize = 0;
queuePosition = -1;
queue[++queuePosition] = w;
queue[++queuePosition] = i;
queue[++queuePosition] = j;
marked[w] = 1;
while (queuePosition >= 0) {
currJ = queue[queuePosition--];
currI = queue[queuePosition--];
currW = queue[queuePosition--];
if (colorFn(pixels[currW], pixels[currW + 1], pixels[currW + 2], pixels[currW + 3], currW, currI, currJ)) {
currGroup[currGroupSize++] = currJ;
currGroup[currGroupSize++] = currI;
for (var k = 0; k < neighboursW.length; k++) {
var otherW = currW + neighboursW[k];
var otherI = currI + neighboursI[k];
var otherJ = currJ + neighboursJ[k];
if (!marked[otherW] && otherI >= 0 && otherI < height && otherJ >= 0 && otherJ < width) {
queue[++queuePosition] = otherW;
queue[++queuePosition] = otherI;
queue[++queuePosition] = otherJ;
marked[otherW] = 1;
}
}
}
}
if (currGroupSize >= minGroupSize) {
var data = this.calculateDimensions_(currGroup, currGroupSize);
if (data) {
data.color = color;
results.push(data);
}
}
}
}
return this.mergeRectangles_(results);
};
// Default colors
//===================
tracking.ColorTracker.registerColor('cyan', function(r, g, b) {
var thresholdGreen = 50,
thresholdBlue = 70,
dx = r - 0,
dy = g - 255,
dz = b - 255;
if ((g - r) >= thresholdGreen && (b - r) >= thresholdBlue) {
return true;
}
return dx * dx + dy * dy + dz * dz < 6400;
});
tracking.ColorTracker.registerColor('magenta', function(r, g, b) {
var threshold = 50,
dx = r - 255,
dy = g - 0,
dz = b - 255;
if ((r - g) >= threshold && (b - g) >= threshold) {
return true;
}
return dx * dx + dy * dy + dz * dz < 19600;
});
tracking.ColorTracker.registerColor('yellow', function(r, g, b) {
var threshold = 50,
dx = r - 255,
dy = g - 255,
dz = b - 0;
if ((r - b) >= threshold && (g - b) >= threshold) {
return true;
}
return dx * dx + dy * dy + dz * dz < 10000;
});
// Caching neighbour i/j offset values.
//=====================================
var neighboursI = new Int32Array([-1, -1, 0, 1, 1, 1, 0, -1]);
var neighboursJ = new Int32Array([0, 1, 1, 1, 0, -1, -1, -1]);
}());

View File

@ -0,0 +1,35 @@
(function() {
tracking.LandmarksTracker = function() {
tracking.LandmarksTracker.base(this, 'constructor');
}
tracking.inherits(tracking.LandmarksTracker, tracking.ObjectTracker);
tracking.LandmarksTracker.prototype.track = function(pixels, width, height) {
var image = {
'data': pixels,
'width': width,
'height': height
};
var classifier = tracking.ViolaJones.classifiers['face'];
var faces = tracking.ViolaJones.detect(pixels, width, height,
this.getInitialScale(), this.getScaleFactor(), this.getStepSize(),
this.getEdgesDensity(), classifier);
var landmarks = tracking.LBF.align(pixels, width, height, faces);
this.emit('track', {
'data': {
'faces' : faces,
'landmarks' : landmarks
}
});
}
}());

View File

@ -0,0 +1,169 @@
(function() {
/**
* ObjectTracker utility.
* @constructor
* @param {string|Array.<string|Array.<number>>} opt_classifiers Optional
* object classifiers to track.
* @extends {tracking.Tracker}
*/
tracking.ObjectTracker = function(opt_classifiers) {
tracking.ObjectTracker.base(this, 'constructor');
if (opt_classifiers) {
if (!Array.isArray(opt_classifiers)) {
opt_classifiers = [opt_classifiers];
}
if (Array.isArray(opt_classifiers)) {
opt_classifiers.forEach(function(classifier, i) {
if (typeof classifier === 'string') {
opt_classifiers[i] = tracking.ViolaJones.classifiers[classifier];
}
if (!opt_classifiers[i]) {
throw new Error('Object classifier not valid, try `new tracking.ObjectTracker("face")`.');
}
});
}
}
this.setClassifiers(opt_classifiers);
};
tracking.inherits(tracking.ObjectTracker, tracking.Tracker);
/**
* Specifies the edges density of a block in order to decide whether to skip
* it or not.
* @default 0.2
* @type {number}
*/
tracking.ObjectTracker.prototype.edgesDensity = 0.2;
/**
* Specifies the initial scale to start the feature block scaling.
* @default 1.0
* @type {number}
*/
tracking.ObjectTracker.prototype.initialScale = 1.0;
/**
* Specifies the scale factor to scale the feature block.
* @default 1.25
* @type {number}
*/
tracking.ObjectTracker.prototype.scaleFactor = 1.25;
/**
* Specifies the block step size.
* @default 1.5
* @type {number}
*/
tracking.ObjectTracker.prototype.stepSize = 1.5;
/**
* Gets the tracker HAAR classifiers.
* @return {TypedArray.<number>}
*/
tracking.ObjectTracker.prototype.getClassifiers = function() {
return this.classifiers;
};
/**
* Gets the edges density value.
* @return {number}
*/
tracking.ObjectTracker.prototype.getEdgesDensity = function() {
return this.edgesDensity;
};
/**
* Gets the initial scale to start the feature block scaling.
* @return {number}
*/
tracking.ObjectTracker.prototype.getInitialScale = function() {
return this.initialScale;
};
/**
* Gets the scale factor to scale the feature block.
* @return {number}
*/
tracking.ObjectTracker.prototype.getScaleFactor = function() {
return this.scaleFactor;
};
/**
* Gets the block step size.
* @return {number}
*/
tracking.ObjectTracker.prototype.getStepSize = function() {
return this.stepSize;
};
/**
* Tracks the `Video` frames. This method is called for each video frame in
* order to emit `track` event.
* @param {Uint8ClampedArray} pixels The pixels data to track.
* @param {number} width The pixels canvas width.
* @param {number} height The pixels canvas height.
*/
tracking.ObjectTracker.prototype.track = function(pixels, width, height) {
var self = this;
var classifiers = this.getClassifiers();
if (!classifiers) {
throw new Error('Object classifier not specified, try `new tracking.ObjectTracker("face")`.');
}
var results = [];
classifiers.forEach(function(classifier) {
results = results.concat(tracking.ViolaJones.detect(pixels, width, height, self.getInitialScale(), self.getScaleFactor(), self.getStepSize(), self.getEdgesDensity(), classifier));
});
this.emit('track', {
data: results
});
};
/**
* Sets the tracker HAAR classifiers.
* @param {TypedArray.<number>} classifiers
*/
tracking.ObjectTracker.prototype.setClassifiers = function(classifiers) {
this.classifiers = classifiers;
};
/**
* Sets the edges density.
* @param {number} edgesDensity
*/
tracking.ObjectTracker.prototype.setEdgesDensity = function(edgesDensity) {
this.edgesDensity = edgesDensity;
};
/**
* Sets the initial scale to start the block scaling.
* @param {number} initialScale
*/
tracking.ObjectTracker.prototype.setInitialScale = function(initialScale) {
this.initialScale = initialScale;
};
/**
* Sets the scale factor to scale the feature block.
* @param {number} scaleFactor
*/
tracking.ObjectTracker.prototype.setScaleFactor = function(scaleFactor) {
this.scaleFactor = scaleFactor;
};
/**
* Sets the block step size.
* @param {number} stepSize
*/
tracking.ObjectTracker.prototype.setStepSize = function(stepSize) {
this.stepSize = stepSize;
};
}());

View File

@ -0,0 +1,21 @@
(function() {
/**
* Tracker utility.
* @constructor
* @extends {tracking.EventEmitter}
*/
tracking.Tracker = function() {
tracking.Tracker.base(this, 'constructor');
};
tracking.inherits(tracking.Tracker, tracking.EventEmitter);
/**
* Tracks the pixels on the array. This method is called for each video
* frame in order to emit `track` event.
* @param {Uint8ClampedArray} pixels The pixels data to track.
* @param {number} width The pixels canvas width.
* @param {number} height The pixels canvas height.
*/
tracking.Tracker.prototype.track = function() {};
}());

View File

@ -0,0 +1,103 @@
(function() {
/**
* TrackerTask utility.
* @constructor
* @extends {tracking.EventEmitter}
*/
tracking.TrackerTask = function(tracker) {
tracking.TrackerTask.base(this, 'constructor');
if (!tracker) {
throw new Error('Tracker instance not specified.');
}
this.setTracker(tracker);
};
tracking.inherits(tracking.TrackerTask, tracking.EventEmitter);
/**
* Holds the tracker instance managed by this task.
* @type {tracking.Tracker}
* @private
*/
tracking.TrackerTask.prototype.tracker_ = null;
/**
* Holds if the tracker task is in running.
* @type {boolean}
* @private
*/
tracking.TrackerTask.prototype.running_ = false;
/**
* Gets the tracker instance managed by this task.
* @return {tracking.Tracker}
*/
tracking.TrackerTask.prototype.getTracker = function() {
return this.tracker_;
};
/**
* Returns true if the tracker task is in running, false otherwise.
* @return {boolean}
* @private
*/
tracking.TrackerTask.prototype.inRunning = function() {
return this.running_;
};
/**
* Sets if the tracker task is in running.
* @param {boolean} running
* @private
*/
tracking.TrackerTask.prototype.setRunning = function(running) {
this.running_ = running;
};
/**
* Sets the tracker instance managed by this task.
* @return {tracking.Tracker}
*/
tracking.TrackerTask.prototype.setTracker = function(tracker) {
this.tracker_ = tracker;
};
/**
* Emits a `run` event on the tracker task for the implementers to run any
* child action, e.g. `requestAnimationFrame`.
* @return {object} Returns itself, so calls can be chained.
*/
tracking.TrackerTask.prototype.run = function() {
var self = this;
if (this.inRunning()) {
return;
}
this.setRunning(true);
this.reemitTrackEvent_ = function(event) {
self.emit('track', event);
};
this.tracker_.on('track', this.reemitTrackEvent_);
this.emit('run');
return this;
};
/**
* Emits a `stop` event on the tracker task for the implementers to stop any
* child action being done, e.g. `requestAnimationFrame`.
* @return {object} Returns itself, so calls can be chained.
*/
tracking.TrackerTask.prototype.stop = function() {
if (!this.inRunning()) {
return;
}
this.setRunning(false);
this.emit('stop');
this.tracker_.removeListener('track', this.reemitTrackEvent_);
return this;
};
}());

View File

@ -0,0 +1,285 @@
(function(window, undefined) {
window.tracking = window.tracking || {};
/**
* Inherit the prototype methods from one constructor into another.
*
* Usage:
* <pre>
* function ParentClass(a, b) { }
* ParentClass.prototype.foo = function(a) { }
*
* function ChildClass(a, b, c) {
* tracking.base(this, a, b);
* }
* tracking.inherits(ChildClass, ParentClass);
*
* var child = new ChildClass('a', 'b', 'c');
* child.foo();
* </pre>
*
* @param {Function} childCtor Child class.
* @param {Function} parentCtor Parent class.
*/
tracking.inherits = function(childCtor, parentCtor) {
function TempCtor() {
}
TempCtor.prototype = parentCtor.prototype;
childCtor.superClass_ = parentCtor.prototype;
childCtor.prototype = new TempCtor();
childCtor.prototype.constructor = childCtor;
/**
* Calls superclass constructor/method.
*
* This function is only available if you use tracking.inherits to express
* inheritance relationships between classes.
*
* @param {!object} me Should always be "this".
* @param {string} methodName The method name to call. Calling superclass
* constructor can be done with the special string 'constructor'.
* @param {...*} var_args The arguments to pass to superclass
* method/constructor.
* @return {*} The return value of the superclass method/constructor.
*/
childCtor.base = function(me, methodName) {
var args = Array.prototype.slice.call(arguments, 2);
return parentCtor.prototype[methodName].apply(me, args);
};
};
/**
* Captures the user camera when tracking a video element and set its source
* to the camera stream.
* @param {HTMLVideoElement} element Canvas element to track.
* @param {object} opt_options Optional configuration to the tracker.
*/
tracking.initUserMedia_ = function(element, opt_options) {
window.navigator.mediaDevices.getUserMedia({
video: true,
audio: (opt_options && opt_options.audio) ? true : false,
}).then(function(stream) {
element.srcObject = stream;
}).catch(function(err) {
throw Error('Cannot capture user camera.');
});
};
/**
* Tests whether the object is a dom node.
* @param {object} o Object to be tested.
* @return {boolean} True if the object is a dom node.
*/
tracking.isNode = function(o) {
return o.nodeType || this.isWindow(o);
};
/**
* Tests whether the object is the `window` object.
* @param {object} o Object to be tested.
* @return {boolean} True if the object is the `window` object.
*/
tracking.isWindow = function(o) {
return !!(o && o.alert && o.document);
};
/**
* Selects a dom node from a CSS3 selector using `document.querySelector`.
* @param {string} selector
* @param {object} opt_element The root element for the query. When not
* specified `document` is used as root element.
* @return {HTMLElement} The first dom element that matches to the selector.
* If not found, returns `null`.
*/
tracking.one = function(selector, opt_element) {
if (this.isNode(selector)) {
return selector;
}
return (opt_element || document).querySelector(selector);
};
/**
* Tracks a canvas, image or video element based on the specified `tracker`
* instance. This method extract the pixel information of the input element
* to pass to the `tracker` instance. When tracking a video, the
* `tracker.track(pixels, width, height)` will be in a
* `requestAnimationFrame` loop in order to track all video frames.
*
* Example:
* var tracker = new tracking.ColorTracker();
*
* tracking.track('#video', tracker);
* or
* tracking.track('#video', tracker, { camera: true });
*
* tracker.on('track', function(event) {
* // console.log(event.data[0].x, event.data[0].y)
* });
*
* @param {HTMLElement} element The element to track, canvas, image or
* video.
* @param {tracking.Tracker} tracker The tracker instance used to track the
* element.
* @param {object} opt_options Optional configuration to the tracker.
*/
tracking.track = function(element, tracker, opt_options) {
element = tracking.one(element);
if (!element) {
throw new Error('Element not found, try a different element or selector.');
}
if (!tracker) {
throw new Error('Tracker not specified, try `tracking.track(element, new tracking.FaceTracker())`.');
}
switch (element.nodeName.toLowerCase()) {
case 'canvas':
return this.trackCanvas_(element, tracker, opt_options);
case 'img':
return this.trackImg_(element, tracker, opt_options);
case 'video':
if (opt_options) {
if (opt_options.camera) {
this.initUserMedia_(element, opt_options);
}
}
return this.trackVideo_(element, tracker, opt_options);
default:
throw new Error('Element not supported, try in a canvas, img, or video.');
}
};
/**
* Tracks a canvas element based on the specified `tracker` instance and
* returns a `TrackerTask` for this track.
* @param {HTMLCanvasElement} element Canvas element to track.
* @param {tracking.Tracker} tracker The tracker instance used to track the
* element.
* @param {object} opt_options Optional configuration to the tracker.
* @return {tracking.TrackerTask}
* @private
*/
tracking.trackCanvas_ = function(element, tracker) {
var self = this;
var task = new tracking.TrackerTask(tracker);
task.on('run', function() {
self.trackCanvasInternal_(element, tracker);
});
return task.run();
};
/**
* Tracks a canvas element based on the specified `tracker` instance. This
* method extract the pixel information of the input element to pass to the
* `tracker` instance.
* @param {HTMLCanvasElement} element Canvas element to track.
* @param {tracking.Tracker} tracker The tracker instance used to track the
* element.
* @param {object} opt_options Optional configuration to the tracker.
* @private
*/
tracking.trackCanvasInternal_ = function(element, tracker) {
var width = element.width;
var height = element.height;
var context = element.getContext('2d');
var imageData = context.getImageData(0, 0, width, height);
tracker.track(imageData.data, width, height);
};
/**
* Tracks a image element based on the specified `tracker` instance. This
* method extract the pixel information of the input element to pass to the
* `tracker` instance.
* @param {HTMLImageElement} element Canvas element to track.
* @param {tracking.Tracker} tracker The tracker instance used to track the
* element.
* @param {object} opt_options Optional configuration to the tracker.
* @private
*/
tracking.trackImg_ = function(element, tracker) {
var width = element.naturalWidth;
var height = element.naturalHeight;
var canvas = document.createElement('canvas');
canvas.width = width;
canvas.height = height;
var task = new tracking.TrackerTask(tracker);
task.on('run', function() {
tracking.Canvas.loadImage(canvas, element.src, 0, 0, width, height, function() {
tracking.trackCanvasInternal_(canvas, tracker);
});
});
return task.run();
};
/**
* Tracks a video element based on the specified `tracker` instance. This
* method extract the pixel information of the input element to pass to the
* `tracker` instance. The `tracker.track(pixels, width, height)` will be in
* a `requestAnimationFrame` loop in order to track all video frames.
* @param {HTMLVideoElement} element Canvas element to track.
* @param {tracking.Tracker} tracker The tracker instance used to track the
* element.
* @param {object} opt_options Optional configuration to the tracker.
* @private
*/
tracking.trackVideo_ = function(element, tracker) {
var canvas = document.createElement('canvas');
var context = canvas.getContext('2d');
var width;
var height;
// FIXME here the video display size of the analysed size
var resizeCanvas_ = function() {
width = element.offsetWidth;
height = element.offsetHeight;
canvas.width = width;
canvas.height = height;
};
resizeCanvas_();
element.addEventListener('resize', resizeCanvas_);
// FIXME: do a process function - it is up to the caller to handle the frequency of detection
// it seems all handled in the tracking.TrackerTask..
// so in short, remove the tracking.TrackerTask from here
// if the user want to use it, it can create it himself
var requestId;
var requestAnimationFrame_ = function() {
requestId = window.requestAnimationFrame(function() {
if (element.readyState === element.HAVE_ENOUGH_DATA) {
try {
// Firefox v~30.0 gets confused with the video readyState firing an
// erroneous HAVE_ENOUGH_DATA just before HAVE_CURRENT_DATA state,
// hence keep trying to read it until resolved.
context.drawImage(element, 0, 0, width, height);
} catch (err) {}
tracking.trackCanvasInternal_(canvas, tracker);
}
requestAnimationFrame_();
});
};
var task = new tracking.TrackerTask(tracker);
task.on('stop', function() {
window.cancelAnimationFrame(requestId);
});
task.on('run', function() {
requestAnimationFrame_();
});
return task.run();
};
// Browser polyfills
//===================
if (!window.URL) {
window.URL = window.URL || window.webkitURL || window.msURL || window.oURL;
}
if (!navigator.getUserMedia) {
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia || navigator.msGetUserMedia;
}
}(window));

View File

@ -0,0 +1,37 @@
(function() {
/**
* Canvas utility.
* @static
* @constructor
*/
tracking.Canvas = {};
/**
* Loads an image source into the canvas.
* @param {HTMLCanvasElement} canvas The canvas dom element.
* @param {string} src The image source.
* @param {number} x The canvas horizontal coordinate to load the image.
* @param {number} y The canvas vertical coordinate to load the image.
* @param {number} width The image width.
* @param {number} height The image height.
* @param {function} opt_callback Callback that fires when the image is loaded
* into the canvas.
* @static
*/
tracking.Canvas.loadImage = function(canvas, src, x, y, width, height, opt_callback) {
var instance = this;
var img = new window.Image();
img.crossOrigin = '*';
img.onload = function() {
var context = canvas.getContext('2d');
canvas.width = width;
canvas.height = height;
context.drawImage(img, x, y, width, height);
if (opt_callback) {
opt_callback.call(instance);
}
img = null;
};
img.src = src;
};
}());

View File

@ -0,0 +1,60 @@
(function() {
/**
* DisjointSet utility with path compression. Some applications involve
* grouping n distinct objects into a collection of disjoint sets. Two
* important operations are then finding which set a given object belongs to
* and uniting the two sets. A disjoint set data structure maintains a
* collection S={ S1 , S2 ,..., Sk } of disjoint dynamic sets. Each set is
* identified by a representative, which usually is a member in the set.
* @static
* @constructor
*/
tracking.DisjointSet = function(length) {
if (length === undefined) {
throw new Error('DisjointSet length not specified.');
}
this.length = length;
this.parent = new Uint32Array(length);
for (var i = 0; i < length; i++) {
this.parent[i] = i;
}
};
/**
* Holds the length of the internal set.
* @type {number}
*/
tracking.DisjointSet.prototype.length = null;
/**
* Holds the set containing the representative values.
* @type {Array.<number>}
*/
tracking.DisjointSet.prototype.parent = null;
/**
* Finds a pointer to the representative of the set containing i.
* @param {number} i
* @return {number} The representative set of i.
*/
tracking.DisjointSet.prototype.find = function(i) {
if (this.parent[i] === i) {
return i;
} else {
return (this.parent[i] = this.find(this.parent[i]));
}
};
/**
* Unites two dynamic sets containing objects i and j, say Si and Sj, into
* a new set that Si Sj, assuming that Si Sj = ;
* @param {number} i
* @param {number} j
*/
tracking.DisjointSet.prototype.union = function(i, j) {
var iRepresentative = this.find(i);
var jRepresentative = this.find(j);
this.parent[iRepresentative] = jRepresentative;
};
}());

View File

@ -0,0 +1,149 @@
(function() {
/**
* EventEmitter utility.
* @constructor
*/
tracking.EventEmitter = function() {};
/**
* Holds event listeners scoped by event type.
* @type {object}
* @private
*/
tracking.EventEmitter.prototype.events_ = null;
/**
* Adds a listener to the end of the listeners array for the specified event.
* @param {string} event
* @param {function} listener
* @return {object} Returns emitter, so calls can be chained.
*/
tracking.EventEmitter.prototype.addListener = function(event, listener) {
if (typeof listener !== 'function') {
throw new TypeError('Listener must be a function');
}
if (!this.events_) {
this.events_ = {};
}
this.emit('newListener', event, listener);
if (!this.events_[event]) {
this.events_[event] = [];
}
this.events_[event].push(listener);
return this;
};
/**
* Returns an array of listeners for the specified event.
* @param {string} event
* @return {array} Array of listeners.
*/
tracking.EventEmitter.prototype.listeners = function(event) {
return this.events_ && this.events_[event];
};
/**
* Execute each of the listeners in order with the supplied arguments.
* @param {string} event
* @param {*} opt_args [arg1], [arg2], [...]
* @return {boolean} Returns true if event had listeners, false otherwise.
*/
tracking.EventEmitter.prototype.emit = function(event) {
var listeners = this.listeners(event);
if (listeners) {
var args = Array.prototype.slice.call(arguments, 1);
for (var i = 0; i < listeners.length; i++) {
if (listeners[i]) {
listeners[i].apply(this, args);
}
}
return true;
}
return false;
};
/**
* Adds a listener to the end of the listeners array for the specified event.
* @param {string} event
* @param {function} listener
* @return {object} Returns emitter, so calls can be chained.
*/
tracking.EventEmitter.prototype.on = tracking.EventEmitter.prototype.addListener;
/**
* Adds a one time listener for the event. This listener is invoked only the
* next time the event is fired, after which it is removed.
* @param {string} event
* @param {function} listener
* @return {object} Returns emitter, so calls can be chained.
*/
tracking.EventEmitter.prototype.once = function(event, listener) {
var self = this;
self.on(event, function handlerInternal() {
self.removeListener(event, handlerInternal);
listener.apply(this, arguments);
});
};
/**
* Removes all listeners, or those of the specified event. It's not a good
* idea to remove listeners that were added elsewhere in the code,
* especially when it's on an emitter that you didn't create.
* @param {string} event
* @return {object} Returns emitter, so calls can be chained.
*/
tracking.EventEmitter.prototype.removeAllListeners = function(opt_event) {
if (!this.events_) {
return this;
}
if (opt_event) {
delete this.events_[opt_event];
} else {
delete this.events_;
}
return this;
};
/**
* Remove a listener from the listener array for the specified event.
* Caution: changes array indices in the listener array behind the listener.
* @param {string} event
* @param {function} listener
* @return {object} Returns emitter, so calls can be chained.
*/
tracking.EventEmitter.prototype.removeListener = function(event, listener) {
if (typeof listener !== 'function') {
throw new TypeError('Listener must be a function');
}
if (!this.events_) {
return this;
}
var listeners = this.listeners(event);
if (Array.isArray(listeners)) {
var i = listeners.indexOf(listener);
if (i < 0) {
return this;
}
listeners.splice(i, 1);
}
return this;
};
/**
* By default EventEmitters will print a warning if more than 10 listeners
* are added for a particular event. This is a useful default which helps
* finding memory leaks. Obviously not all Emitters should be limited to 10.
* This function allows that to be increased. Set to zero for unlimited.
* @param {number} n The maximum number of listeners.
*/
tracking.EventEmitter.prototype.setMaxListeners = function() {
throw new Error('Not implemented');
};
}());

View File

@ -0,0 +1,392 @@
(function() {
/**
* Image utility.
* @static
* @constructor
*/
tracking.Image = {};
/**
* Computes gaussian blur. Adapted from
* https://github.com/kig/canvasfilters.
* @param {pixels} pixels The pixels in a linear [r,g,b,a,...] array.
* @param {number} width The image width.
* @param {number} height The image height.
* @param {number} diameter Gaussian blur diameter, must be greater than 1.
* @return {array} The edge pixels in a linear [r,g,b,a,...] array.
*/
tracking.Image.blur = function(pixels, width, height, diameter) {
diameter = Math.abs(diameter);
if (diameter <= 1) {
throw new Error('Diameter should be greater than 1.');
}
var radius = diameter / 2;
var len = Math.ceil(diameter) + (1 - (Math.ceil(diameter) % 2));
var weights = new Float32Array(len);
var rho = (radius + 0.5) / 3;
var rhoSq = rho * rho;
var gaussianFactor = 1 / Math.sqrt(2 * Math.PI * rhoSq);
var rhoFactor = -1 / (2 * rho * rho);
var wsum = 0;
var middle = Math.floor(len / 2);
for (var i = 0; i < len; i++) {
var x = i - middle;
var gx = gaussianFactor * Math.exp(x * x * rhoFactor);
weights[i] = gx;
wsum += gx;
}
for (var j = 0; j < weights.length; j++) {
weights[j] /= wsum;
}
return this.separableConvolve(pixels, width, height, weights, weights, false);
};
/**
* Computes the integral image for summed, squared, rotated and sobel pixels.
* @param {array} pixels The pixels in a linear [r,g,b,a,...] array to loop
* through.
* @param {number} width The image width.
* @param {number} height The image height.
* @param {array} opt_integralImage Empty array of size `width * height` to
* be filled with the integral image values. If not specified compute sum
* values will be skipped.
* @param {array} opt_integralImageSquare Empty array of size `width *
* height` to be filled with the integral image squared values. If not
* specified compute squared values will be skipped.
* @param {array} opt_tiltedIntegralImage Empty array of size `width *
* height` to be filled with the rotated integral image values. If not
* specified compute sum values will be skipped.
* @param {array} opt_integralImageSobel Empty array of size `width *
* height` to be filled with the integral image of sobel values. If not
* specified compute sobel filtering will be skipped.
* @static
*/
tracking.Image.computeIntegralImage = function(pixels, width, height, opt_integralImage, opt_integralImageSquare, opt_tiltedIntegralImage, opt_integralImageSobel) {
if (arguments.length < 4) {
throw new Error('You should specify at least one output array in the order: sum, square, tilted, sobel.');
}
var pixelsSobel;
if (opt_integralImageSobel) {
pixelsSobel = tracking.Image.sobel(pixels, width, height);
}
for (var i = 0; i < height; i++) {
for (var j = 0; j < width; j++) {
var w = i * width * 4 + j * 4;
var pixel = ~~(pixels[w] * 0.299 + pixels[w + 1] * 0.587 + pixels[w + 2] * 0.114);
if (opt_integralImage) {
this.computePixelValueSAT_(opt_integralImage, width, i, j, pixel);
}
if (opt_integralImageSquare) {
this.computePixelValueSAT_(opt_integralImageSquare, width, i, j, pixel * pixel);
}
if (opt_tiltedIntegralImage) {
var w1 = w - width * 4;
var pixelAbove = ~~(pixels[w1] * 0.299 + pixels[w1 + 1] * 0.587 + pixels[w1 + 2] * 0.114);
this.computePixelValueRSAT_(opt_tiltedIntegralImage, width, i, j, pixel, pixelAbove || 0);
}
if (opt_integralImageSobel) {
this.computePixelValueSAT_(opt_integralImageSobel, width, i, j, pixelsSobel[w]);
}
}
}
};
/**
* Helper method to compute the rotated summed area table (RSAT) by the
* formula:
*
* RSAT(x, y) = RSAT(x-1, y-1) + RSAT(x+1, y-1) - RSAT(x, y-2) + I(x, y) + I(x, y-1)
*
* @param {number} width The image width.
* @param {array} RSAT Empty array of size `width * height` to be filled with
* the integral image values. If not specified compute sum values will be
* skipped.
* @param {number} i Vertical position of the pixel to be evaluated.
* @param {number} j Horizontal position of the pixel to be evaluated.
* @param {number} pixel Pixel value to be added to the integral image.
* @static
* @private
*/
tracking.Image.computePixelValueRSAT_ = function(RSAT, width, i, j, pixel, pixelAbove) {
var w = i * width + j;
RSAT[w] = (RSAT[w - width - 1] || 0) + (RSAT[w - width + 1] || 0) - (RSAT[w - width - width] || 0) + pixel + pixelAbove;
};
/**
* Helper method to compute the summed area table (SAT) by the formula:
*
* SAT(x, y) = SAT(x, y-1) + SAT(x-1, y) + I(x, y) - SAT(x-1, y-1)
*
* @param {number} width The image width.
* @param {array} SAT Empty array of size `width * height` to be filled with
* the integral image values. If not specified compute sum values will be
* skipped.
* @param {number} i Vertical position of the pixel to be evaluated.
* @param {number} j Horizontal position of the pixel to be evaluated.
* @param {number} pixel Pixel value to be added to the integral image.
* @static
* @private
*/
tracking.Image.computePixelValueSAT_ = function(SAT, width, i, j, pixel) {
var w = i * width + j;
SAT[w] = (SAT[w - width] || 0) + (SAT[w - 1] || 0) + pixel - (SAT[w - width - 1] || 0);
};
/**
* Converts a color from a color-space based on an RGB color model to a
* grayscale representation of its luminance. The coefficients represent the
* measured intensity perception of typical trichromat humans, in
* particular, human vision is most sensitive to green and least sensitive
* to blue.
* @param {Uint8Array|Uint8ClampedArray|Array} pixels The pixels in a linear [r,g,b,a,...] array.
* @param {number} width The image width.
* @param {number} height The image height.
* @param {boolean} fillRGBA If the result should fill all RGBA values with the gray scale
* values, instead of returning a single value per pixel.
* @return {Uint8Array} The grayscale pixels in a linear array ([p,p,p,a,...] if fillRGBA
* is true and [p1, p2, p3, ...] if fillRGBA is false).
* @static
*/
tracking.Image.grayscale = function(pixels, width, height, fillRGBA) {
/*
Performance result (rough EST. - image size, CPU arch. will affect):
https://jsperf.com/tracking-new-image-to-grayscale
Firefox v.60b:
fillRGBA Gray only
Old 11 551 OPs/sec
New 3548 6487 OPs/sec
---------------------------------
322.5x 11.8x faster
Chrome v.67b:
fillRGBA Gray only
Old 291 489 OPs/sec
New 6975 6635 OPs/sec
---------------------------------
24.0x 13.6x faster
- Ken Nilsen / epistemex
*/
var len = pixels.length>>2;
var gray = fillRGBA ? new Uint32Array(len) : new Uint8Array(len);
var data32 = new Uint32Array(pixels.buffer || new Uint8Array(pixels).buffer);
var i = 0;
var c = 0;
var luma = 0;
// unrolled loops to not have to check fillRGBA each iteration
if (fillRGBA) {
while(i < len) {
// Entire pixel in little-endian order (ABGR)
c = data32[i];
// Using the more up-to-date REC/BT.709 approx. weights for luma instead: [0.2126, 0.7152, 0.0722].
// luma = ((c>>>16 & 0xff) * 0.2126 + (c>>>8 & 0xff) * 0.7152 + (c & 0xff) * 0.0722 + 0.5)|0;
// But I'm using scaled integers here for speed (x 0xffff). This can be improved more using 2^n
// close to the factors allowing for shift-ops (i.e. 4732 -> 4096 => .. (c&0xff) << 12 .. etc.)
// if "accuracy" is not important (luma is anyway an visual approx.):
luma = ((c>>>16&0xff) * 13933 + (c>>>8&0xff) * 46871 + (c&0xff) * 4732)>>>16;
gray[i++] = luma * 0x10101 | c & 0xff000000;
}
}
else {
while(i < len) {
c = data32[i];
luma = ((c>>>16&0xff) * 13933 + (c>>>8&0xff) * 46871 + (c&0xff) * 4732)>>>16;
// ideally, alpha should affect value here: value * (alpha/255) or with shift-ops for the above version
gray[i++] = luma;
}
}
// Consolidate array view to byte component format independent of source view
return new Uint8Array(gray.buffer);
};
/**
* Fast horizontal separable convolution. A point spread function (PSF) is
* said to be separable if it can be broken into two one-dimensional
* signals: a vertical and a horizontal projection. The convolution is
* performed by sliding the kernel over the image, generally starting at the
* top left corner, so as to move the kernel through all the positions where
* the kernel fits entirely within the boundaries of the image. Adapted from
* https://github.com/kig/canvasfilters.
* @param {pixels} pixels The pixels in a linear [r,g,b,a,...] array.
* @param {number} width The image width.
* @param {number} height The image height.
* @param {array} weightsVector The weighting vector, e.g [-1,0,1].
* @param {number} opaque
* @return {array} The convoluted pixels in a linear [r,g,b,a,...] array.
*/
tracking.Image.horizontalConvolve = function(pixels, width, height, weightsVector, opaque) {
var side = weightsVector.length;
var halfSide = Math.floor(side / 2);
var output = new Float32Array(width * height * 4);
var alphaFac = opaque ? 1 : 0;
for (var y = 0; y < height; y++) {
for (var x = 0; x < width; x++) {
var sy = y;
var sx = x;
var offset = (y * width + x) * 4;
var r = 0;
var g = 0;
var b = 0;
var a = 0;
for (var cx = 0; cx < side; cx++) {
var scy = sy;
var scx = Math.min(width - 1, Math.max(0, sx + cx - halfSide));
var poffset = (scy * width + scx) * 4;
var wt = weightsVector[cx];
r += pixels[poffset] * wt;
g += pixels[poffset + 1] * wt;
b += pixels[poffset + 2] * wt;
a += pixels[poffset + 3] * wt;
}
output[offset] = r;
output[offset + 1] = g;
output[offset + 2] = b;
output[offset + 3] = a + alphaFac * (255 - a);
}
}
return output;
};
/**
* Fast vertical separable convolution. A point spread function (PSF) is
* said to be separable if it can be broken into two one-dimensional
* signals: a vertical and a horizontal projection. The convolution is
* performed by sliding the kernel over the image, generally starting at the
* top left corner, so as to move the kernel through all the positions where
* the kernel fits entirely within the boundaries of the image. Adapted from
* https://github.com/kig/canvasfilters.
* @param {pixels} pixels The pixels in a linear [r,g,b,a,...] array.
* @param {number} width The image width.
* @param {number} height The image height.
* @param {array} weightsVector The weighting vector, e.g [-1,0,1].
* @param {number} opaque
* @return {array} The convoluted pixels in a linear [r,g,b,a,...] array.
*/
tracking.Image.verticalConvolve = function(pixels, width, height, weightsVector, opaque) {
var side = weightsVector.length;
var halfSide = Math.floor(side / 2);
var output = new Float32Array(width * height * 4);
var alphaFac = opaque ? 1 : 0;
for (var y = 0; y < height; y++) {
for (var x = 0; x < width; x++) {
var sy = y;
var sx = x;
var offset = (y * width + x) * 4;
var r = 0;
var g = 0;
var b = 0;
var a = 0;
for (var cy = 0; cy < side; cy++) {
var scy = Math.min(height - 1, Math.max(0, sy + cy - halfSide));
var scx = sx;
var poffset = (scy * width + scx) * 4;
var wt = weightsVector[cy];
r += pixels[poffset] * wt;
g += pixels[poffset + 1] * wt;
b += pixels[poffset + 2] * wt;
a += pixels[poffset + 3] * wt;
}
output[offset] = r;
output[offset + 1] = g;
output[offset + 2] = b;
output[offset + 3] = a + alphaFac * (255 - a);
}
}
return output;
};
/**
* Fast separable convolution. A point spread function (PSF) is said to be
* separable if it can be broken into two one-dimensional signals: a
* vertical and a horizontal projection. The convolution is performed by
* sliding the kernel over the image, generally starting at the top left
* corner, so as to move the kernel through all the positions where the
* kernel fits entirely within the boundaries of the image. Adapted from
* https://github.com/kig/canvasfilters.
* @param {pixels} pixels The pixels in a linear [r,g,b,a,...] array.
* @param {number} width The image width.
* @param {number} height The image height.
* @param {array} horizWeights The horizontal weighting vector, e.g [-1,0,1].
* @param {array} vertWeights The vertical vector, e.g [-1,0,1].
* @param {number} opaque
* @return {array} The convoluted pixels in a linear [r,g,b,a,...] array.
*/
tracking.Image.separableConvolve = function(pixels, width, height, horizWeights, vertWeights, opaque) {
var vertical = this.verticalConvolve(pixels, width, height, vertWeights, opaque);
return this.horizontalConvolve(vertical, width, height, horizWeights, opaque);
};
/**
* Compute image edges using Sobel operator. Computes the vertical and
* horizontal gradients of the image and combines the computed images to
* find edges in the image. The way we implement the Sobel filter here is by
* first grayscaling the image, then taking the horizontal and vertical
* gradients and finally combining the gradient images to make up the final
* image. Adapted from https://github.com/kig/canvasfilters.
* @param {pixels} pixels The pixels in a linear [r,g,b,a,...] array.
* @param {number} width The image width.
* @param {number} height The image height.
* @return {array} The edge pixels in a linear [r,g,b,a,...] array.
*/
tracking.Image.sobel = function(pixels, width, height) {
pixels = this.grayscale(pixels, width, height, true);
var output = new Float32Array(width * height * 4);
var sobelSignVector = new Float32Array([-1, 0, 1]);
var sobelScaleVector = new Float32Array([1, 2, 1]);
var vertical = this.separableConvolve(pixels, width, height, sobelSignVector, sobelScaleVector);
var horizontal = this.separableConvolve(pixels, width, height, sobelScaleVector, sobelSignVector);
for (var i = 0; i < output.length; i += 4) {
var v = vertical[i];
var h = horizontal[i];
var p = Math.sqrt(h * h + v * v);
output[i] = p;
output[i + 1] = p;
output[i + 2] = p;
output[i + 3] = 255;
}
return output;
};
/**
* Equalizes the histogram of a grayscale image, normalizing the
* brightness and increasing the contrast of the image.
* @param {pixels} pixels The grayscale pixels in a linear array.
* @param {number} width The image width.
* @param {number} height The image height.
* @return {array} The equalized grayscale pixels in a linear array.
*/
tracking.Image.equalizeHist = function(pixels, width, height){
var equalized = new Uint8ClampedArray(pixels.length);
var histogram = new Array(256);
for(var i=0; i < 256; i++) histogram[i] = 0;
for(var i=0; i < pixels.length; i++){
equalized[i] = pixels[i];
histogram[pixels[i]]++;
}
var prev = histogram[0];
for(var i=0; i < 256; i++){
histogram[i] += prev;
prev = histogram[i];
}
var norm = 255 / pixels.length;
for(var i=0; i < pixels.length; i++)
equalized[i] = (histogram[pixels[i]] * norm + 0.5) | 0;
return equalized;
}
}());

View File

@ -0,0 +1,14 @@
var Benchmark = require('./utils/benchmark.js');
module.exports = {
setUp: function(done) {
Benchmark.setUpAll(done);
},
testBenchmark: function(test) {
Benchmark.runAll(function(results) {
test.ok(results.passed, Benchmark.createFailureMessage(results.resultDetails));
test.done();
});
}
};

View File

@ -0,0 +1,76 @@
'use strict';
var tracking = require('./utils/sandbox.js');
module.exports = {
setUp: function(done) {
done();
},
tearDown: function(done) {
done();
},
// TODO: Update this test to generate randomWindowOffsets_ and randomImageOffsets_ instead.
testGetDescriptors: function(test) {
var descriptors;
var descriptorsPerKeypoint = tracking.Brief.N / 32;
var grayScale = [
0, 0, 1, 0, 0, 0,
1, 9, 0, 9, 1, 0,
0, 1, 1, 1, 0, 0
];
var repeat = [-7, 7, -6, 6, -5, 5, -1, 1];
var width = 6;
// Write the offsets manually, as we can't verify results that are obtained randomly.
tracking.Brief.randomImageOffsets_[width] = [];
for (var i = 0; i < tracking.Brief.N; i++) {
var position = i % 4;
tracking.Brief.randomImageOffsets_[width].push(repeat[position * 2], repeat[position * 2 + 1]);
}
descriptors = tracking.Brief.getDescriptors(grayScale, width, [1, 1, 3, 1]);
test.equal(2 * descriptorsPerKeypoint, descriptors.length, 'There should be 8 descriptor words');
for (var j = 0; j < descriptorsPerKeypoint; j++) {
test.equal(858993459, descriptors[j], 'Descriptor should be 858993459');
}
for (var k = descriptorsPerKeypoint; k < 2 * descriptorsPerKeypoint; k++) {
test.equal(-286331154, descriptors[k], 'Descriptor should be -286331154');
}
test.done();
},
testGetMatchings: function(test) {
var descriptors1;
var descriptors2;
var grayScale1 = [
0, 0, 1, 0, 0, 0,
1, 9, 0, 9, 1, 0,
0, 1, 1, 1, 0, 0
];
var grayScale2 = [
0, 0, 0, 1, 0, 0,
0, 1, 9, 0, 9, 1,
0, 0, 1, 1, 1, 0
];
var keypoints1 = [1, 1, 3, 1];
var keypoints2 = [4, 1, 2, 1];
var matchings;
var width = 6;
descriptors1 = tracking.Brief.getDescriptors(grayScale1, width, keypoints1);
descriptors2 = tracking.Brief.getDescriptors(grayScale2, width, keypoints2);
matchings = tracking.Brief.match(keypoints1, descriptors1, keypoints2, descriptors2);
test.equal(2, matchings.length, 'There should be 2 matchings');
test.equal(1, matchings[0].index2, 'Keypoint 0 from 1st array should match keypoint 1 from the 2nd');
test.equal(0, matchings[1].index2, 'Keypoint 1 from 1st array should match keypoint 0 from the 2nd');
test.done();
}
};

View File

@ -0,0 +1,194 @@
'use strict';
var tracking = require('./utils/sandbox.js');
module.exports = {
setUp: function(done) {
done();
},
tearDown: function(done) {
done();
},
testConstructorEmpty: function(test) {
var colors;
var tracker;
test.doesNotThrow(function() {
tracker = new tracking.ColorTracker();
});
colors = tracker.getColors();
test.equal(1, colors.length, 'Colors array should have a single value');
test.equal('magenta', colors[0], 'Default color is magenta');
test.done();
},
testConstructorString: function(test) {
var colors;
var tracker;
test.doesNotThrow(function() {
tracker = new tracking.ColorTracker('yellow');
});
colors = tracker.getColors();
test.equal(1, colors.length, 'Colors array should have a single value');
test.equal('yellow', colors[0], 'The colors array should be set to value in the constructor');
test.throws(function() {
tracker = new tracking.ColorTracker('notvalid');
});
test.done();
},
testConstructorArray: function(test) {
var colors;
var tracker;
test.doesNotThrow(function() {
tracker = new tracking.ColorTracker([]);
});
colors = tracker.getColors();
test.equal(0, colors.length, 'Colors array should be empty');
test.doesNotThrow(function() {
tracker = new tracking.ColorTracker(['magenta', 'cyan', 'yellow']);
});
colors = tracker.getColors();
test.equal(3, colors.length, 'Colors array have 3 values');
test.equal('magenta', colors[0], 'The colors array should be set to values in the constructor');
test.equal('cyan', colors[1], 'The colors array should be set to values in the constructor');
test.equal('yellow', colors[2], 'The colors array should be set to values in the constructor');
test.throws(function() {
tracker = new tracking.ColorTracker(['magenta', null, 'yellow']);
});
test.done();
},
testFindColor: function(test) {
var colors;
var pixels;
var tracker;
tracking.ColorTracker.registerColor('black', function(r, g, b) {
return r === 0 && g === 0 && b === 0;
});
tracker = new tracking.ColorTracker('black');
colors = tracker.getColors();
test.equal(1, colors.length, 'Colors array have a single value');
test.equal('black', colors[0], 'The colors array should be set to values in the constructor');
tracker.setMinDimension(2);
tracker.setMinGroupSize(6);
pixels = [
1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0,
1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1,
1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
];
tracker.on('track', function(event) {
test.equal(1, event.data.length, 'There should only be one result rectangle');
test.equal(1, event.data[0].x, 'The first rectangle should be at x = 1');
test.equal(0, event.data[0].y, 'The first rectangle should be at y = 0');
test.equal(2, event.data[0].width, 'The first rectangle\'s width should be 2');
test.equal(3, event.data[0].height, 'The first rectangle\'s height should be 3');
test.done();
});
tracker.track(pixels, 5, 4);
},
testMergedRectangles: function(test) {
var pixels;
var tracker;
tracking.ColorTracker.registerColor('black', function(r, g, b) {
return r === 0 && g === 0 && b === 0;
});
tracker = new tracking.ColorTracker('black');
tracker.setMinDimension(1);
tracker.setMinGroupSize(6);
pixels = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0
];
tracker.on('track', function(event) {
test.equal(2, event.data.length, 'There should be 2 result rectangles');
test.equal(0, event.data[0].x, 'The first rectangle should be at x = 0');
test.equal(0, event.data[0].y, 'The first rectangle should be at y = 0');
test.equal(5, event.data[0].width, 'The first rectangle\'s width should be 5');
test.equal(6, event.data[0].height, 'The first rectangle\'s height should be 6');
test.equal(2, event.data[1].x, 'The second rectangle should be at x = 2');
test.equal(8, event.data[1].y, 'The second rectangle should be at y = 8');
test.equal(1, event.data[1].width, 'The second rectangle\'s width should be 1');
test.equal(2, event.data[1].height, 'The second rectangle\'s height should be 2');
test.done();
});
tracker.track(pixels, 6, 11);
},
testDimensionConstraints: function(test) {
var pixels;
var tracker;
tracking.ColorTracker.registerColor('black', function(r, g, b) {
return r === 0 && g === 0 && b === 0;
});
tracker = new tracking.ColorTracker('black');
tracker.setMinDimension(1);
tracker.setMaxDimension(2);
tracker.setMinGroupSize(6);
pixels = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0
];
tracker.on('track', function(event) {
test.equal(1, event.data.length, 'There should be 1 result rectangle');
test.equal(1, event.data[0].width, 'The rectangle\'s width should be 1');
test.equal(2, event.data[0].height, 'The rectangle\'s height should be 2');
test.done();
});
tracker.track(pixels, 6, 11);
}
};

View File

@ -0,0 +1,69 @@
'use strict';
var tracking = require('./utils/sandbox.js');
module.exports = {
setUp: function(done) {
done();
},
tearDown: function(done) {
done();
},
testCornerDetection: function(test) {
test.ok(
tracking.Fast.isCorner(
150,
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 255],
10
),
'A corner should have been detected'
);
test.equal(
false,
tracking.Fast.isCorner(
150,
[0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255],
10
),
'No corners should have been detected'
);
test.done();
},
testFindCorners: function(test) {
var corners,
pixels = [];
for (var i = 0; i < 64; i++) {
if (i === 27 || i === 28) {
pixels.push(0);
}
else {
pixels.push(255);
}
}
corners = tracking.Fast.findCorners(pixels, 8, 8);
test.equal(
2,
corners.length,
'Should have found 2 corners'
);
test.equal(
3,
corners[0],
'Corner should at x = 3'
);
test.equal(
3,
corners[1],
'Corner should be at y = 3'
);
test.done();
}
};

View File

@ -0,0 +1,77 @@
'use strict';
var tracking = require('./utils/sandbox.js');
module.exports = {
setUp: function(done) {
done();
},
tearDown: function(done) {
done();
},
testConstructorEmpty: function(test) {
test.doesNotThrow(
function() {
new tracking.ObjectTracker();
}
);
test.done();
},
testConstructorClassifier: function(test) {
test.doesNotThrow(
function() {
new tracking.ObjectTracker(tracking.ViolaJones.classifiers.face);
}
);
test.done();
},
testConstructorString: function(test) {
test.doesNotThrow(
function() {
new tracking.ObjectTracker('face');
}
);
test.throws(
function() {
new tracking.ObjectTracker('notvalid');
}
);
test.done();
},
testConstructorArray: function(test) {
test.doesNotThrow(
function() {
new tracking.ObjectTracker([]);
}
);
test.doesNotThrow(
function() {
new tracking.ObjectTracker([tracking.ViolaJones.classifiers.face]);
}
);
test.doesNotThrow(
function() {
new tracking.ObjectTracker(['face', 'mouth', 'eye']);
}
);
test.throws(
function() {
new tracking.ObjectTracker(['face', null, 'eye']);
}
);
test.done();
}
};

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Some files were not shown because too many files have changed in this diff Show More