Skip to content

Commit

Permalink
fixed "How to Use CompreFace" tutorial, added demos
Browse files Browse the repository at this point in the history
  • Loading branch information
pospielov committed May 23, 2021
1 parent f3f5c46 commit 069ef39
Show file tree
Hide file tree
Showing 3 changed files with 200 additions and 21 deletions.
79 changes: 58 additions & 21 deletions docs/How-to-Use-CompreFace.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
their access roles or create new [Face Services](Face-services-and-plugins.md).

**Step 5.** To recognize subjects among the known subjects, you need to create Face Recognition Service. After creating a new Face
Service, you will see it in the Services List with an appropriate name and API key.
Service, you will see it in the Services List with an appropriate name and API key. After this step, you can look at our [demos](#demos).

**Step 6.** To add known subjects to your Face Collection of Face Recognition Service, you can use REST API.
Once you’ve uploaded all known faces, you can test the collection using REST API or the TEST page.
Expand Down Expand Up @@ -38,7 +38,7 @@ JSON contains an array of objects that represent each recognized face. Each obje
"x_min": 319,
"y_min": 55
},
"faces": [
"subjects": [
{
"similarity": 0.99593,
"subject": "lisan"
Expand All @@ -52,38 +52,75 @@ JSON contains an array of objects that represent each recognized face. Each obje
}
```

## Demos

1. [tutorial_demo.html](./demos/tutorial_demo.html)

This demo shows the most simple example of Face recognition service usage.
To run demo, just open html file in browser.
API key for this demo was created on **step 5** of [How to Use CompreFace](#how-to-use-compreface)

2. [webcam_demo.html](./demos/webcam_demo.html)

This demo shows the most simple webcam demo for Face recognition service.
To run demo, just open html file in browser.
API key for this demo was created on **step 5** of [How to Use CompreFace](#how-to-use-compreface)

## Code Snippets

Here is a JavaScript code snippet that loads a new image to your Face Collection:

```js
async function saveNewImageToFaceCollection() {
let name = encodeURIComponent('John');
function saveNewImageToFaceCollection(elem) {
let subject = encodeURIComponent(document.getElementById("subject").value);
let apiKey = document.getElementById("apiKey").value;
let formData = new FormData();
let photo = document.getElementById("fileDropRef").files[0];
let photo = elem.files[0];

formData.append("photo", photo);
formData.append("file", photo);

try {
let r = await fetch('http://localhost:8000/api/v1/recognition/faces/?subject=`${name}`', {method: "POST", body: formData});
} catch (e) {
console.log('Houston, we have a problem...:', e);
}
fetch('http://localhost:8000/api/v1/recognition/faces/?subject=' + subject,
{
method: "POST",
headers: {
"x-api-key": apiKey
},
body: formData
}
).then(r => r.json()).then(
function (data) {
console.log('New example was saved', data);
})
.catch(function (error) {
alert('Request failed: ' + JSON.stringify(error));
});
}
```

This function sends the image to our server and shows results in a text area:

```js
function recognizeFace(input) {
function recognizeFace(elem) {
let apiKey = document.getElementById("apiKey").value;
let formData = new FormData();
let photo = elem.files[0];

async function getData() {
let response = await fetch('http://localhost:8000/api/v1/recognition/recognize')
let data = await response.json()
return data
}
formData.append("file", photo);

let result = Promise.resolve(response)
result.then(data => {
document.getElementById("result-textarea-request").innerHTML = JSON.stringify(data);
});
fetch('http://localhost:8000/api/v1/recognition/recognize',
{
method: "POST",
headers: {
"x-api-key": apiKey
},
body: formData
}
).then(r => r.json()).then(
function (data) {
document.getElementById("result").innerHTML = JSON.stringify(data);
})
.catch(function (error) {
alert('Request failed: ' + JSON.stringify(error));
});
}
```
70 changes: 70 additions & 0 deletions docs/demos/tutorial_demo.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
<!DOCTYPE html>
<html>
<head>
<script type="text/javascript">
function saveNewImageToFaceCollection(elem) {
let subject = encodeURIComponent(document.getElementById("subject").value);
let apiKey = document.getElementById("apiKey").value;
let formData = new FormData();
let photo = elem.files[0];

formData.append("file", photo);

fetch('http://localhost:8000/api/v1/recognition/faces/?subject=' + subject,
{
method: "POST",
headers: {
"x-api-key": apiKey
},
body: formData
}
).then(r => r.json()).then(
function (data) {
console.log('New example was saved', data);
})
.catch(function (error) {
alert('Request failed: ' + JSON.stringify(error));
});
}

function recognizeFace(elem) {
let apiKey = document.getElementById("apiKey").value;
let formData = new FormData();
let photo = elem.files[0];

formData.append("file", photo);

fetch('http://localhost:8000/api/v1/recognition/recognize',
{
method: "POST",
headers: {
"x-api-key": apiKey
},
body: formData
}
).then(r => r.json()).then(
function (data) {
document.getElementById("result").innerHTML = JSON.stringify(data);
})
.catch(function (error) {
alert('Request failed: ' + JSON.stringify(error));
});
}
</script>
<title>test</title>
</head>
<body>

<label for="apiKey">API key:</label><input id="apiKey" />
<div></div>
<label for="subject">Subject:</label><input id="subject" />
<div>Click to add photo:</div>
<input type=file id="newFace" onchange="saveNewImageToFaceCollection(this)" />
<div>Click to recognize photo</div>
<input type=file id="recognizeFace" onchange="recognizeFace(this)" />
<div>Result:</div>
<div id="result"></div>


</body>
</html>
72 changes: 72 additions & 0 deletions docs/demos/webcam_demo.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
<!DOCTYPE html>
<html>
<head>
<script type="text/javascript">
function video() {
let video = document.getElementById("live");
let canvas = document.getElementById("canvas");
let canvas2 = document.getElementById("canvas2");
let ctx = canvas.getContext('2d');
let ctx2 = canvas2.getContext('2d');
let apiKey = document.getElementById("apiKey").value;

navigator.mediaDevices.getUserMedia({
video: {width: 640, height: 480}
}).then(function (stream) {
video.srcObject = stream;

document.addEventListener("next_frame", draw);

const evt = new Event("next_frame", {"bubbles": true, "cancelable": false});
document.dispatchEvent(evt);
});

function draw() {
ctx.drawImage(video, 0, 0, 640, 480);
canvas.toBlob(function (blob) {
blob.name = "blob.jpeg"
let fd = new FormData();
fd.append('file', blob, "blob.jpeg");

fetch('http://localhost:8000/api/v1/recognition/recognize',
{
method: "POST",
headers: {
"x-api-key": apiKey
},
body: fd
}
).then(r => r.json()).then(
function (data) {
const evt = new Event("next_frame", {"bubbles": true, "cancelable": false});
document.dispatchEvent(evt);
ctx2.clearRect(0, 0, 640, 480);
ctx2.drawImage(video, 0, 0, 640, 480);
if (!data.result) {
return;
}
let box = data.result[0].box;
let name = data.result[0].subjects[0].subject;
ctx2.lineWidth = 3;
ctx2.strokeStyle = 'green';
ctx2.strokeRect(box.x_min, box.y_min, box.x_max - box.x_min, box.y_max - box.y_min);
ctx2.font = '24px serif';
ctx2.strokeText(name, box.x_min, box.y_min - 20);
});
}, 'image/jpeg', 0.95);
}

}


</script>
<title>test</title>
</head>
<body>
<label for="apiKey">API key:</label><input id="apiKey" />
<button onclick="video()">video</button>
<video id="live" width="640" height="480" autoplay style="display:none;"></video>
<canvas width="640" id="canvas" height="480" style="display:none;"></canvas>
<canvas width="640" id="canvas2" height="480"></canvas>
</body>
</html>

0 comments on commit 069ef39

Please sign in to comment.