Skip to content

Commit

Permalink
Update rest samples
Browse files Browse the repository at this point in the history
  • Loading branch information
Honry committed Sep 20, 2024
1 parent c826772 commit e4b9b75
Show file tree
Hide file tree
Showing 33 changed files with 540 additions and 246 deletions.
1 change: 1 addition & 0 deletions face_recognition/.eslintrc.js
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
module.exports = {
globals: {
'MLGraphBuilder': 'readonly',
'MLTensorUsage': 'readonly',
'tf': 'readonly',
},
};
29 changes: 22 additions & 7 deletions face_recognition/facenet_nchw.js
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@ export class FaceNetNchw {
this.context_ = null;
this.builder_ = null;
this.graph_ = null;
this.inputTensor_ = null;
this.outputTensor_ = null;
this.weightsUrl_ = weightsOrigin() +
'/test-data/models/facenet_nchw/weights';
this.inputOptions = {
Expand All @@ -25,6 +27,7 @@ export class FaceNetNchw {
distanceMetric: 'euclidean',
threshold: 1.26,
};
this.outputShape_ = [1, 512];
}

async buildConv_(
Expand Down Expand Up @@ -138,10 +141,19 @@ export class FaceNetNchw {
async load(contextOptions) {
this.context_ = await navigator.ml.createContext(contextOptions);
this.builder_ = new MLGraphBuilder(this.context_);
const input = this.builder_.input('input', {
const inputDesc = {
dataType: 'float32',

Check failure on line 145 in face_recognition/facenet_nchw.js

View workflow job for this annotation

GitHub Actions / job (ubuntu-latest)

Expected indentation of 6 spaces but found 8
dimensions: this.inputOptions.inputShape,

Check failure on line 146 in face_recognition/facenet_nchw.js

View workflow job for this annotation

GitHub Actions / job (ubuntu-latest)

Expected indentation of 6 spaces but found 8
shape: this.inputOptions.inputShape,

Check failure on line 147 in face_recognition/facenet_nchw.js

View workflow job for this annotation

GitHub Actions / job (ubuntu-latest)

Expected indentation of 6 spaces but found 8
};
const input = this.builder_.input('input', inputDesc);
inputDesc.usage = MLTensorUsage.WRITE;
this.inputTensor_ = await this.context_.createTensor(inputDesc);
this.outputTensor_ = await this.context_.createTensor({
dataType: 'float32',
dimensions: this.inputOptions.inputShape,
shape: this.inputOptions.inputShape,
dimensions: this.outputShape_,
shape: this.outputShape_,
usage: MLTensorUsage.READ,
});

const poolOptions = {windowDimensions: [3, 3], strides};
Expand Down Expand Up @@ -272,9 +284,12 @@ export class FaceNetNchw {
}
}

async compute(inputBuffer, outputs) {
const inputs = {'input': inputBuffer};
const results = await this.context_.compute(this.graph_, inputs, outputs);
return results;
async compute(inputBuffer) {
this.context_.writeTensor(this.inputTensor_, inputBuffer);
const inputs = {'input': this.inputTensor_};
const outputs = {'output': this.outputTensor_};
this.context_.dispatch(this.graph_, inputs, outputs);
const results = await this.context_.readTensor(this.outputTensor_);
return new Float32Array(results);
}
}
25 changes: 20 additions & 5 deletions face_recognition/facenet_nhwc.js
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@ export class FaceNetNhwc {
this.context_ = null;
this.builder_ = null;
this.graph_ = null;
this.inputTensor_ = null;
this.outputTensor_ = null;
this.weightsUrl_ = weightsOrigin() +
'/test-data/models/facenet_nhwc/weights';
this.inputOptions = {
Expand All @@ -25,6 +27,7 @@ export class FaceNetNhwc {
distanceMetric: 'euclidean',
threshold: 1.26,
};
this.outputShape_ = [1, 512];
}

async buildConv_(input, namePrefix, options = undefined, relu = true) {
Expand Down Expand Up @@ -139,10 +142,19 @@ export class FaceNetNhwc {
async load(contextOptions) {
this.context_ = await navigator.ml.createContext(contextOptions);
this.builder_ = new MLGraphBuilder(this.context_);
const input = this.builder_.input('input', {
const inputDesc = {
dataType: 'float32',
dimensions: this.inputOptions.inputShape,
shape: this.inputOptions.inputShape,
};
const input = this.builder_.input('input', inputDesc);
inputDesc.usage = MLTensorUsage.WRITE;
this.inputTensor_ = await this.context_.createTensor(inputDesc);
this.outputTensor_ = await this.context_.createTensor({
dataType: 'float32',
dimensions: this.outputShape_,
shape: this.outputShape_,
usage: MLTensorUsage.READ,
});

const poolOptions = {windowDimensions: [3, 3], strides, layout: 'nhwc'};
Expand Down Expand Up @@ -241,9 +253,12 @@ export class FaceNetNhwc {
}
}

async compute(inputBuffer, outputs) {
const inputs = {'input': inputBuffer};
const results = await this.context_.compute(this.graph_, inputs, outputs);
return results;
async compute(inputBuffer) {
this.context_.writeTensor(this.inputTensor_, inputBuffer);
const inputs = {'input': this.inputTensor_};
const outputs = {'output': this.outputTensor_};
this.context_.dispatch(this.graph_, inputs, outputs);
const results = await this.context_.readTensor(this.outputTensor_);
return new Float32Array(results);
}
}
28 changes: 8 additions & 20 deletions face_recognition/main.js
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,6 @@ let stream = null;
let loadTime = 0;
let buildTime = 0;
let computeTime = 0;
let fdOutputs;
let frOutputs;
let deviceType = '';
let lastdeviceType = '';
let backend = '';
Expand Down Expand Up @@ -180,15 +178,14 @@ async function getEmbeddings(inputElem) {
const fdInputBuffer = utils.getInputTensor(inputElem, fdInputOptions);
let totalComputeTime = 0;
let start = performance.now();
const results = await fdInstance.compute(fdInputBuffer, fdOutputs);
const results = await fdInstance.compute(fdInputBuffer);
totalComputeTime = performance.now() - start;
fdOutputs = results.outputs;
const strokedRects = [];
const embeddings = [];
const height = inputElem.naturalHeight || inputElem.height;
const width = inputElem.naturalWidth || inputElem.width;
const fdOutputArrary = [];
for (const output of Object.entries(fdOutputs)) {
for (const output of Object.entries(results)) {
fdOutputArrary.push(output[1]);
}
const fdSsdOutputs = SsdDecoder.processSsdOutputTensor(
Expand Down Expand Up @@ -222,10 +219,9 @@ async function getEmbeddings(inputElem) {
frInputOptions.drawOptions = drawOptions;
const frInputBuffer = utils.getInputTensor(inputElem, frInputOptions);
start = performance.now();
const results = await frInstance.compute(frInputBuffer, frOutputs);
const results = await frInstance.compute(frInputBuffer);
totalComputeTime += performance.now() - start;
frOutputs = results.outputs;
const [...normEmbedding] = Float32Array.from(frOutputs.output);
const [...normEmbedding] = Float32Array.from(results);
embeddings.push(normEmbedding);
}
return {computeTime: totalComputeTime, strokedRects, embeddings};
Expand Down Expand Up @@ -330,12 +326,6 @@ async function main() {
frInstance = constructNetObject(frInstanceType);
fdInputOptions = fdInstance.inputOptions;
frInputOptions = frInstance.inputOptions;
fdOutputs = {};
for (const outputInfo of Object.entries(fdInstance.outputsInfo)) {
fdOutputs[outputInfo[0]] =
new Float32Array(utils.sizeOfShape(outputInfo[1]));
}
frOutputs = {'output': new Float32Array(utils.sizeOfShape([1, 512]))};
isFirstTimeLoad = false;
console.log(`- Model name: ${fdModelName}, Model layout: ${layout} -`);
// UI shows model loading progress
Expand Down Expand Up @@ -374,12 +364,10 @@ async function main() {
let medianComputeTime;
console.log('- Computing... ');
// Do warm up
const fdResults = await fdInstance.compute(new Float32Array(
utils.sizeOfShape(fdInputOptions.inputShape)), fdOutputs);
const frResults = await frInstance.compute(new Float32Array(
utils.sizeOfShape(frInputOptions.inputShape)), frOutputs);
fdOutputs = fdResults.outputs;
frOutputs = frResults.outputs;
await fdInstance.compute(new Float32Array(
utils.sizeOfShape(fdInputOptions.inputShape)));
await frInstance.compute(new Float32Array(
utils.sizeOfShape(frInputOptions.inputShape)));
for (let i = 0; i < numRuns; i++) {
if (numRuns > 1) {
// clear all predicted embeddings for benckmarking
Expand Down
1 change: 1 addition & 0 deletions facial_landmark_detection/.eslintrc.js
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
module.exports = {
globals: {
'MLGraphBuilder': 'readonly',
'MLTensorUsage': 'readonly',
'tf': 'readonly',
},
};
25 changes: 20 additions & 5 deletions facial_landmark_detection/face_landmark_nchw.js
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,15 @@ export class FaceLandmarkNchw {
this.context_ = null;
this.builder_ = null;
this.graph_ = null;
this.inputTensor_ = null;
this.outputTensor_ = null;
this.weightsUrl_ = weightsOrigin() +
'/test-data/models/face_landmark_nchw/weights';
this.inputOptions = {
inputLayout: 'nchw',
inputShape: [1, 3, 128, 128],
};
this.outputShape_ = [1, 136];
}

async buildMaxPool2d(input, options) {
Expand Down Expand Up @@ -69,10 +72,19 @@ export class FaceLandmarkNchw {
async load(contextOptions) {
this.context_ = await navigator.ml.createContext(contextOptions);
this.builder_ = new MLGraphBuilder(this.context_);
const input = this.builder_.input('input', {
const inputDesc = {
dataType: 'float32',
dimensions: this.inputOptions.inputShape,
shape: this.inputOptions.inputShape,
};
const input = this.builder_.input('input', inputDesc);
inputDesc.usage = MLTensorUsage.WRITE;
this.inputTensor_ = await this.context_.createTensor(inputDesc);
this.outputTensor_ = await this.context_.createTensor({
dataType: 'float32',
dimensions: this.outputShape_,
shape: this.outputShape_,
usage: MLTensorUsage.READ,
});

const poolOptions =
Expand Down Expand Up @@ -112,9 +124,12 @@ export class FaceLandmarkNchw {
}
}

async compute(inputBuffer, outputs) {
const inputs = {'input': inputBuffer};
const results = await this.context_.compute(this.graph_, inputs, outputs);
return results;
async compute(inputBuffer) {
this.context_.writeTensor(this.inputTensor_, inputBuffer);
const inputs = {'input': this.inputTensor_};
const outputs = {'output': this.outputTensor_};
this.context_.dispatch(this.graph_, inputs, outputs);
const results = await this.context_.readTensor(this.outputTensor_);
return new Float32Array(results);
}
}
25 changes: 20 additions & 5 deletions facial_landmark_detection/face_landmark_nhwc.js
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,15 @@ export class FaceLandmarkNhwc {
this.context_ = null;
this.builder_ = null;
this.graph_ = null;
this.inputTensor_ = null;
this.outputTensor_ = null;
this.weightsUrl_ = weightsOrigin() +
'/test-data/models/face_landmark_nhwc/weights';
this.inputOptions = {
inputLayout: 'nhwc',
inputShape: [1, 128, 128, 3],
};
this.outputShape_ = [1, 136];
}

async buildMaxPool2d(input, options) {
Expand Down Expand Up @@ -70,10 +73,19 @@ export class FaceLandmarkNhwc {
async load(contextOptions) {
this.context_ = await navigator.ml.createContext(contextOptions);
this.builder_ = new MLGraphBuilder(this.context_);
const input = this.builder_.input('input', {
const inputDesc = {
dataType: 'float32',
dimensions: this.inputOptions.inputShape,
shape: this.inputOptions.inputShape,
};
const input = this.builder_.input('input', inputDesc);
inputDesc.usage = MLTensorUsage.WRITE;
this.inputTensor_ = await this.context_.createTensor(inputDesc);
this.outputTensor_ = await this.context_.createTensor({
dataType: 'float32',
dimensions: this.outputShape_,
shape: this.outputShape_,
usage: MLTensorUsage.READ,
});

const poolOptions =
Expand Down Expand Up @@ -115,9 +127,12 @@ export class FaceLandmarkNhwc {
}
}

async compute(inputBuffer, outputs) {
const inputs = {'input': inputBuffer};
const results = await this.context_.compute(this.graph_, inputs, outputs);
return results;
async compute(inputBuffer) {
this.context_.writeTensor(this.inputTensor_, inputBuffer);
const inputs = {'input': this.inputTensor_};
const outputs = {'output': this.outputTensor_};
this.context_.dispatch(this.graph_, inputs, outputs);
const results = await this.context_.readTensor(this.outputTensor_);
return new Float32Array(results);
}
}
28 changes: 8 additions & 20 deletions facial_landmark_detection/main.js
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,6 @@ let stream = null;
let loadTime = 0;
let buildTime = 0;
let computeTime = 0;
let fdOutputs;
let fldOutputs;
let deviceType = '';
let lastdeviceType = '';
let backend = '';
Expand Down Expand Up @@ -149,15 +147,14 @@ async function predict(inputElement) {
const fdInputBuffer = utils.getInputTensor(inputElement, fdInputOptions);
let totalComputeTime = 0;
let start = performance.now();
const results = await fdInstance.compute(fdInputBuffer, fdOutputs);
const results = await fdInstance.compute(fdInputBuffer);
totalComputeTime += performance.now() - start;
fdOutputs = results.outputs;
const strokedRects = [];
const keyPoints = [];
const height = inputElement.naturalHeight || inputElement.height;
const width = inputElement.naturalWidth || inputElement.width;
const fdOutputArrary = [];
for (const output of Object.entries(fdOutputs)) {
for (const output of Object.entries(results)) {
fdOutputArrary.push(output[1]);
}
const fdSsdOutputs = SsdDecoder.processSsdOutputTensor(
Expand Down Expand Up @@ -191,10 +188,9 @@ async function predict(inputElement) {
fldInputOptions.drawOptions = drawOptions;
const fldInputBuffer = utils.getInputTensor(inputElement, fldInputOptions);
start = performance.now();
const results = await fldInstance.compute(fldInputBuffer, fldOutputs);
const results = await fldInstance.compute(fldInputBuffer);
totalComputeTime += performance.now() - start;
fldOutputs = results.outputs;
keyPoints.push(fldOutputs.output.slice());
keyPoints.push(results.slice());
}
return [totalComputeTime.toFixed(2), strokedRects, keyPoints];
}
Expand Down Expand Up @@ -266,12 +262,6 @@ async function main() {
fldInstance = constructNetObject(fldInstanceType);
fdInputOptions = fdInstance.inputOptions;
fldInputOptions = fldInstance.inputOptions;
fdOutputs = {};
for (const outputInfo of Object.entries(fdInstance.outputsInfo)) {
fdOutputs[outputInfo[0]] =
new Float32Array(utils.sizeOfShape(outputInfo[1]));
}
fldOutputs = {'output': new Float32Array(utils.sizeOfShape([1, 136]))};
isFirstTimeLoad = false;
console.log(`- Model name: ${fdModelName}, Model layout: ${layout} -`);
// UI shows model loading progress
Expand Down Expand Up @@ -312,11 +302,9 @@ async function main() {
console.log('- Computing... ');
// Do warm up
const fdResults = await fdInstance.compute(new Float32Array(
utils.sizeOfShape(fdInputOptions.inputShape)), fdOutputs);
utils.sizeOfShape(fdInputOptions.inputShape)));
const fldResults = await fldInstance.compute(new Float32Array(
utils.sizeOfShape(fldInputOptions.inputShape)), fldOutputs);
fdOutputs = fdResults.outputs;
fldOutputs = fldResults.outputs;
utils.sizeOfShape(fldInputOptions.inputShape)));
for (let i = 0; i < numRuns; i++) {
[computeTime, strokedRects, keyPoints] = await predict(imgElement);
console.log(` compute time ${i+1}: ${computeTime} ms`);
Expand All @@ -327,8 +315,8 @@ async function main() {
medianComputeTime = medianComputeTime.toFixed(2);
console.log(` median compute time: ${medianComputeTime} ms`);
}
console.log('Face Detection model outputs: ', fdOutputs);
console.log('Face Landmark model outputs: ', fldOutputs);
console.log('Face Detection model outputs: ', fdResults);
console.log('Face Landmark model outputs: ', fldResults);
await ui.showProgressComponent('done', 'done', 'done');
$('#fps').hide();
ui.readyShowResultComponents();
Expand Down
Loading

0 comments on commit e4b9b75

Please sign in to comment.