From 3f772ae1d9454315d1a667750b0084c57245cf71 Mon Sep 17 00:00:00 2001 From: Linchenn Date: Sat, 6 Aug 2022 16:56:54 -0700 Subject: [PATCH 1/8] mb23 --- e2e/benchmarks/local-benchmark/README.md | 28 +++++---- e2e/benchmarks/local-benchmark/index.html | 71 +++++++++++++++++++---- e2e/benchmarks/model_config.js | 42 +++++++++++--- 3 files changed, 109 insertions(+), 32 deletions(-) diff --git a/e2e/benchmarks/local-benchmark/README.md b/e2e/benchmarks/local-benchmark/README.md index b780a0777b2..7898dfb4273 100644 --- a/e2e/benchmarks/local-benchmark/README.md +++ b/e2e/benchmarks/local-benchmark/README.md @@ -67,14 +67,20 @@ the benchmark. # Benchmark test It's easy to set up a web server to host benchmarks and run against them via e2e/benchmarks/local-benchmark/index.html. You can manually specify the optional url parameters as needed. Here are the list of supported url parameters: -architecture: same as architecture
-backend: same as backend
-benchmark: same as models
-inputSize: same as inputSizes
-inputType: same as inputTypes
-localBuild: local build name list, separated by comma. The name is in short form (in general the name without the tfjs- and backend- prefixes, for example webgl for tfjs-backend-webgl, core for tfjs-core). Example: 'webgl,core'.
-run: same as numRuns
-task: correctness to "Test correctness" or performance to "Run benchmark"
-warmup: same as numWarmups
-modelUrl: same as modelUrl, for custom models only
-${InputeName}Shape: the input shape array, separated by comma, for custom models only. For example, bodypix's [graph model](https://storage.googleapis.com/tfjs-models/savedmodel/bodypix/mobilenet/float/075/model-stride16.json) has an input named sub_2, then users could add '`sub_2Shape=1,1,1,3`' in the URL to populate its shape.
+* Model related parameters: + + architecture: same as architecture
+ alpha: same as alpha
+ benchmark: same as models
+ inputSize: same as inputSizes
+ inputType: same as inputTypes
+ modelUrl: same as modelUrl, for custom models only
+ ${InputeName}Shape: the input shape array, separated by comma, for custom models only. For example, bodypix's [graph model](https://storage. googleapis.com/tfjs-models/savedmodel/bodypix/mobilenet/float/075/ model-stride16.json) has an input named sub_2, then users could add '`sub_2Shape=1,1,1,3`' in the URL to populate its shape.
+ +* Environment related parameters: + + backend: same as backend
+ localBuild: local build name list, separated by comma. The name is in short form (in general the name without the tfjs- and backend- prefixes, for example webgl for tfjs-backend-webgl, core for tfjs-core). Example: 'webgl,core'.
+ run: same as numRuns
+ task: correctness to "Test correctness" or performance to "Run benchmark"
+ warmup: same as numWarmups
diff --git a/e2e/benchmarks/local-benchmark/index.html b/e2e/benchmarks/local-benchmark/index.html index f47bec3983f..d0de07d8151 100644 --- a/e2e/benchmarks/local-benchmark/index.html +++ b/e2e/benchmarks/local-benchmark/index.html @@ -126,6 +126,7 @@

TensorFlow.js Model Benchmark

let inputSizeController = null; let inputTypeController = null; let modelArchitectureController = null; + let modelAlphaController = null; let tunableFlagsControllers = null; function updateGUIFromURLState() { @@ -198,7 +199,7 @@

TensorFlow.js Model Benchmark

numWarmups: warmupTimes, numRuns: runTimes, numProfiles: profileTimes, - benchmark: 'mobilenet_v2', + benchmark: 'mobilenet_v3', run: (v) => { runBenchmark().catch(e => { showMsg('Error: ' + e.message); @@ -280,11 +281,12 @@

TensorFlow.js Model Benchmark

appendRow(timeTable, '', ''); } }, - backend: 'wasm', + backend: 'webgl', kernelTiming: 'aggregate', inputSize: 0, inputType: '', - architecture: '', + architecture: 'small', + alpha: '075', modelType: '', modelUrl: '', isModelChanged: false, @@ -350,6 +352,9 @@

TensorFlow.js Model Benchmark

if (isParameterDefined('architectures')) { appendRow(parameterTable, 'architecture', state.architecture); } + if (isParameterDefined('alphas')) { + appendRow(parameterTable, 'alpha', state.alpha); + } } async function setupKernelTable() { @@ -514,6 +519,8 @@

TensorFlow.js Model Benchmark

state.inputType = urlState.get('inputType'); if (urlState.has('architecture')) state.architecture = urlState.get('architecture'); + if (urlState.has('alpha')) + state.alpha = urlState.get('alpha'); } async function loadModelAndRecordTime() { @@ -535,7 +542,7 @@

TensorFlow.js Model Benchmark

if (isTflite()) { await loadTfliteModel(); } else { - model = await benchmark.load(inputSize, state.architecture, state.inputType); + model = await benchmark.load(inputSize, state.architecture, state.inputType, state.alpha); } state.inputs = []; const inputs = isTflite() ? tfliteModel.inputs : model.inputs @@ -803,6 +810,10 @@

TensorFlow.js Model Benchmark

modelParameterFolder.remove(modelArchitectureController); modelArchitectureController = null; } + if (modelAlphaController !== null) { + modelParameterFolder.remove(modelAlphaController); + modelAlphaController = null; + } if (inputSizeController !== null) { modelParameterFolder.remove(inputSizeController); inputSizeController = null; @@ -846,10 +857,29 @@

TensorFlow.js Model Benchmark

modelArchitectureController.setValue(defaultModelArchitecture); state.architecture = defaultModelArchitecture; } else { - // Model doesn't support input size. + // Model doesn't support architecture. state.architecture = ''; } + if (isParameterDefined('alphas')) { + modelAlphaController = modelParameterFolder.add(state, 'alpha', benchmark['alphas']).name('alphas').onChange(async alpha => { + state.alpha = alpha; + state.isModelChanged = true; + }); + // Current use first value as default. + let defaultModelAlpha = null; + if (isURLParameterDefined('alpha')) + defaultModelAlpha = urlState.get('alpha'); + else + defaultModelAlpha = benchmark['alphas'][0]; + + modelAlphaController.setValue(defaultModelAlpha); + state.alpha = defaultModelAlpha; + } else { + // Model doesn't support alpha. + state.alpha = ''; + } + if (isParameterDefined('inputTypes')) { inputTypeController = modelParameterFolder.add(state, 'inputType', benchmark['inputTypes']).name('inputTypes').onChange(async inputType => { state.inputType = inputType; @@ -868,7 +898,10 @@

TensorFlow.js Model Benchmark

// Model doesn't support input type. state.inputType = ''; } - if (isParameterDefined('inputSizes') || isParameterDefined('inputTypes') || isParameterDefined('architectures')) { + + // Unfolding the model parameter UI if any model parameters are deinfed + // in the current model. + if (isParameterDefined('inputSizes') || isParameterDefined('inputTypes') || isParameterDefined('architectures') || isParameterDefined('alphas')) { modelParameterFolder.open(); } @@ -881,7 +914,7 @@

TensorFlow.js Model Benchmark

}); modelUrlController.domElement.querySelector('input').placeholder = 'https://your-domain.com/model-path/model.json'; - if (modelUrlController != null && urlState.has('modelUrl')) { + if (modelUrlController != null && urlState && urlState.has('modelUrl')) { modelUrlController.setValue(urlState.get('modelUrl')); } } @@ -899,18 +932,32 @@

TensorFlow.js Model Benchmark

parameterFolder.add(state, 'kernelTiming', ['aggregate', 'individual']); parameterFolder.open(); + // Show model parameter UI when loading the page. modelParameterFolder = gui.addFolder('Model Parameters'); + // For each model parameter, show it only if it is defined in the + // pre-selected model. if (isParameterDefined('architectures')) { - modelArchitectureController = modelParameterFolder.add(state, 'architecture', []); + modelArchitectureController = modelParameterFolder.add(state, 'architecture', benchmarks[state.benchmark]['architectures']); + modelArchitectureController.setValue(state.architecture); + } + if (isParameterDefined('alphas')) { + modelAlphaController = modelParameterFolder.add(state, 'alpha', benchmarks[state.benchmark]['alphas']); + modelAlphaController.setValue(state.alpha); } if (isParameterDefined('inputSizes')) { - inputSizeController = modelParameterFolder.add(state, 'inputSize', []); + inputSizeController = modelParameterFolder.add(state, 'inputSize', benchmarks[state.benchmark]['inputSizes']); + inputSizeController.setValue(state.inputSize); } if (isParameterDefined('inputTypes')) { - inputTypeController = modelParameterFolder.add(state, 'inputType', []); + inputTypeController = modelParameterFolder.add(state, 'inputType', benchmarks[state.benchmark]['inputTypes']); + inputTypeController.setValue(state.inputType); } - modelParameterFolder.open(); + // Unfolding the model parameter UI if any model parameters are deinfed + // in the pre-selected model. + if (isParameterDefined('inputSizes') || isParameterDefined('inputTypes') || isParameterDefined('architectures') || isParameterDefined('alphas')) { + modelParameterFolder.open(); + } const envFolder = gui.addFolder('Environment'); const backendsController = envFolder.add( @@ -973,7 +1020,7 @@

TensorFlow.js Model Benchmark

tfliteModel.modelRunner.cleanUp(); } const benchmark = benchmarks[state.benchmark]; - tfliteModel = await benchmark.loadTflite(enableProfiling); + tfliteModel = await benchmark.loadTflite(enableProfiling, state.architecture, state.alpha); } function updateModelsDropdown(newValues) { diff --git a/e2e/benchmarks/model_config.js b/e2e/benchmarks/model_config.js index 27cb52b99d8..4201d6e8962 100644 --- a/e2e/benchmarks/model_config.js +++ b/e2e/benchmarks/model_config.js @@ -91,14 +91,20 @@ function predictFunction(input) { const benchmarks = { 'mobilenet_v3': { type: 'GraphModel', - load: async () => { - const url = - 'https://tfhub.dev/google/tfjs-model/imagenet/mobilenet_v3_small_100_224/classification/5/default/1'; + alphas: ['075', '100'], + architectures: ['small', 'large'], + load: async ( + inputResolution = 224, modelArchitecture = 'small', + inputType = 'tensor', alpha = '075') => { + const url = `https://tfhub.dev/google/tfjs-model/imagenet/mobilenet_v3_${ + modelArchitecture}_${alpha}_224/classification/5/default/1`; return tf.loadGraphModel(url, {fromTFHub: true}); }, - loadTflite: async (enableProfiling = false) => { - const url = - 'https://tfhub.dev/google/lite-model/imagenet/mobilenet_v3_small_100_224/classification/5/metadata/1'; + loadTflite: async ( + enableProfiling = false, modelArchitecture = 'small', + alpha = '075') => { + const url = `https://tfhub.dev/google/lite-model/imagenet/mobilenet_v3_${ + modelArchitecture}_${alpha}_224/classification/5/metadata/1`; return tflite.loadTFLiteModel(url, {enableProfiling}); }, predictFunc: () => { @@ -111,11 +117,29 @@ const benchmarks = { }, }, 'mobilenet_v2': { + type: 'GraphModel', + alphas: ['050', '075', '100'], + load: async ( + inputResolution = 224, modelArchitecture = '', inputType = 'tensor', + alpha = '050') => { + const url = `https://tfhub.dev/google/tfjs-model/imagenet/mobilenet_v2_${ + alpha}_224/classification/3/default/1`; + return tf.loadGraphModel(url, {fromTFHub: true}); + }, + predictFunc: () => { + const input = tf.randomNormal([1, 224, 224, 3]); + return predictFunction(input); + }, + }, + // Currently, for mibilnet_v2, only alpha=100 has tflite model. Since users + // could tune the alpha for 'mobilenet_v2' tfjs models, while we could only + // provides mibilnet_v2_lite with alpha=100 on the tflite backend, so + // mibilnet_v2_lite is separated from mibilnet_v2 and fixes alpha=100; othwise + // it would confuse users. + 'mobilenet_v2_lite': { type: 'GraphModel', load: async () => { - const url = - 'https://storage.googleapis.com/learnjs-data/mobilenet_v2_100_fused/model.json'; - return tf.loadGraphModel(url); + throw new Error(`Please set tflite as the backend to run this model.`); }, loadTflite: async (enableProfiling = false) => { const url = From 4340da3bd7ccdfe396466b326127efefac8a48e5 Mon Sep 17 00:00:00 2001 From: Linchenn Date: Sat, 6 Aug 2022 17:04:21 -0700 Subject: [PATCH 2/8] fix --- e2e/benchmarks/local-benchmark/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/e2e/benchmarks/local-benchmark/README.md b/e2e/benchmarks/local-benchmark/README.md index 7898dfb4273..45e13f45611 100644 --- a/e2e/benchmarks/local-benchmark/README.md +++ b/e2e/benchmarks/local-benchmark/README.md @@ -75,7 +75,7 @@ It's easy to set up a web server to host benchmarks and run against them via e2e inputSize: same as inputSizes
inputType: same as inputTypes
modelUrl: same as modelUrl, for custom models only
- ${InputeName}Shape: the input shape array, separated by comma, for custom models only. For example, bodypix's [graph model](https://storage. googleapis.com/tfjs-models/savedmodel/bodypix/mobilenet/float/075/ model-stride16.json) has an input named sub_2, then users could add '`sub_2Shape=1,1,1,3`' in the URL to populate its shape.
+ ${InputeName}Shape: the input shape array, separated by comma, for custom models only. For example, bodypix's [graph model](https://storage.googleapis.com/tfjs-models/savedmodel/bodypix/mobilenet/float/075/model-stride16.json) has an input named sub_2, then users could add '`sub_2Shape=1,1,1,3`' in the URL to populate its shape.
* Environment related parameters: From a0b44e40b8571313124242d490cda31b4ea393e2 Mon Sep 17 00:00:00 2001 From: Linchenn Date: Sat, 6 Aug 2022 17:18:50 -0700 Subject: [PATCH 3/8] rename --- e2e/benchmarks/model_config.js | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/e2e/benchmarks/model_config.js b/e2e/benchmarks/model_config.js index 4201d6e8962..40172e9556c 100644 --- a/e2e/benchmarks/model_config.js +++ b/e2e/benchmarks/model_config.js @@ -89,7 +89,7 @@ function predictFunction(input) { } const benchmarks = { - 'mobilenet_v3': { + 'MobileNetV3': { type: 'GraphModel', alphas: ['075', '100'], architectures: ['small', 'large'], @@ -116,7 +116,7 @@ const benchmarks = { } }, }, - 'mobilenet_v2': { + 'MobileNetV2': { type: 'GraphModel', alphas: ['050', '075', '100'], load: async ( @@ -136,7 +136,7 @@ const benchmarks = { // provides mibilnet_v2_lite with alpha=100 on the tflite backend, so // mibilnet_v2_lite is separated from mibilnet_v2 and fixes alpha=100; othwise // it would confuse users. - 'mobilenet_v2_lite': { + 'MobileNetV2Lite': { type: 'GraphModel', load: async () => { throw new Error(`Please set tflite as the backend to run this model.`); From 602d204b0b3ef57da8cc8056f9c43f9fd358c3f2 Mon Sep 17 00:00:00 2001 From: Linchenn Date: Sun, 7 Aug 2022 11:12:11 -0700 Subject: [PATCH 4/8] Update index.html --- e2e/benchmarks/local-benchmark/index.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/e2e/benchmarks/local-benchmark/index.html b/e2e/benchmarks/local-benchmark/index.html index d0de07d8151..5e1fde65e7d 100644 --- a/e2e/benchmarks/local-benchmark/index.html +++ b/e2e/benchmarks/local-benchmark/index.html @@ -199,7 +199,7 @@

TensorFlow.js Model Benchmark

numWarmups: warmupTimes, numRuns: runTimes, numProfiles: profileTimes, - benchmark: 'mobilenet_v3', + benchmark: 'MobileNetV3', run: (v) => { runBenchmark().catch(e => { showMsg('Error: ' + e.message); From 786f7e4454d5f5831619605aef80bba3da3a1790 Mon Sep 17 00:00:00 2001 From: Linchenn Date: Mon, 8 Aug 2022 13:47:25 -0700 Subject: [PATCH 5/8] Update README.md --- e2e/benchmarks/local-benchmark/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/e2e/benchmarks/local-benchmark/README.md b/e2e/benchmarks/local-benchmark/README.md index 45e13f45611..895f441d4e1 100644 --- a/e2e/benchmarks/local-benchmark/README.md +++ b/e2e/benchmarks/local-benchmark/README.md @@ -69,8 +69,8 @@ It's easy to set up a web server to host benchmarks and run against them via e2e * Model related parameters: - architecture: same as architecture
- alpha: same as alpha
+ architecture: same as architecture (only certain models has it, such as MobileNetV3 and posenet)
+ alpha: same as alpha (only certain models has it, such as MobileNetV3)
benchmark: same as models
inputSize: same as inputSizes
inputType: same as inputTypes
From 535b6424b82f2b0c70db88b37eb03672c77a971e Mon Sep 17 00:00:00 2001 From: Linchenn Date: Mon, 8 Aug 2022 14:14:02 -0700 Subject: [PATCH 6/8] rollback Alpha in html --- e2e/benchmarks/local-benchmark/index.html | 42 +++-------------------- 1 file changed, 4 insertions(+), 38 deletions(-) diff --git a/e2e/benchmarks/local-benchmark/index.html b/e2e/benchmarks/local-benchmark/index.html index 5e1fde65e7d..b5f6ed1bc84 100644 --- a/e2e/benchmarks/local-benchmark/index.html +++ b/e2e/benchmarks/local-benchmark/index.html @@ -126,7 +126,6 @@

TensorFlow.js Model Benchmark

let inputSizeController = null; let inputTypeController = null; let modelArchitectureController = null; - let modelAlphaController = null; let tunableFlagsControllers = null; function updateGUIFromURLState() { @@ -286,7 +285,6 @@

TensorFlow.js Model Benchmark

inputSize: 0, inputType: '', architecture: 'small', - alpha: '075', modelType: '', modelUrl: '', isModelChanged: false, @@ -352,9 +350,6 @@

TensorFlow.js Model Benchmark

if (isParameterDefined('architectures')) { appendRow(parameterTable, 'architecture', state.architecture); } - if (isParameterDefined('alphas')) { - appendRow(parameterTable, 'alpha', state.alpha); - } } async function setupKernelTable() { @@ -519,8 +514,6 @@

TensorFlow.js Model Benchmark

state.inputType = urlState.get('inputType'); if (urlState.has('architecture')) state.architecture = urlState.get('architecture'); - if (urlState.has('alpha')) - state.alpha = urlState.get('alpha'); } async function loadModelAndRecordTime() { @@ -542,7 +535,7 @@

TensorFlow.js Model Benchmark

if (isTflite()) { await loadTfliteModel(); } else { - model = await benchmark.load(inputSize, state.architecture, state.inputType, state.alpha); + model = await benchmark.load(inputSize, state.architecture, state.inputType); } state.inputs = []; const inputs = isTflite() ? tfliteModel.inputs : model.inputs @@ -810,10 +803,6 @@

TensorFlow.js Model Benchmark

modelParameterFolder.remove(modelArchitectureController); modelArchitectureController = null; } - if (modelAlphaController !== null) { - modelParameterFolder.remove(modelAlphaController); - modelAlphaController = null; - } if (inputSizeController !== null) { modelParameterFolder.remove(inputSizeController); inputSizeController = null; @@ -861,25 +850,6 @@

TensorFlow.js Model Benchmark

state.architecture = ''; } - if (isParameterDefined('alphas')) { - modelAlphaController = modelParameterFolder.add(state, 'alpha', benchmark['alphas']).name('alphas').onChange(async alpha => { - state.alpha = alpha; - state.isModelChanged = true; - }); - // Current use first value as default. - let defaultModelAlpha = null; - if (isURLParameterDefined('alpha')) - defaultModelAlpha = urlState.get('alpha'); - else - defaultModelAlpha = benchmark['alphas'][0]; - - modelAlphaController.setValue(defaultModelAlpha); - state.alpha = defaultModelAlpha; - } else { - // Model doesn't support alpha. - state.alpha = ''; - } - if (isParameterDefined('inputTypes')) { inputTypeController = modelParameterFolder.add(state, 'inputType', benchmark['inputTypes']).name('inputTypes').onChange(async inputType => { state.inputType = inputType; @@ -901,7 +871,7 @@

TensorFlow.js Model Benchmark

// Unfolding the model parameter UI if any model parameters are deinfed // in the current model. - if (isParameterDefined('inputSizes') || isParameterDefined('inputTypes') || isParameterDefined('architectures') || isParameterDefined('alphas')) { + if (isParameterDefined('inputSizes') || isParameterDefined('inputTypes') || isParameterDefined('architectures')) { modelParameterFolder.open(); } @@ -940,10 +910,6 @@

TensorFlow.js Model Benchmark

modelArchitectureController = modelParameterFolder.add(state, 'architecture', benchmarks[state.benchmark]['architectures']); modelArchitectureController.setValue(state.architecture); } - if (isParameterDefined('alphas')) { - modelAlphaController = modelParameterFolder.add(state, 'alpha', benchmarks[state.benchmark]['alphas']); - modelAlphaController.setValue(state.alpha); - } if (isParameterDefined('inputSizes')) { inputSizeController = modelParameterFolder.add(state, 'inputSize', benchmarks[state.benchmark]['inputSizes']); inputSizeController.setValue(state.inputSize); @@ -955,7 +921,7 @@

TensorFlow.js Model Benchmark

// Unfolding the model parameter UI if any model parameters are deinfed // in the pre-selected model. - if (isParameterDefined('inputSizes') || isParameterDefined('inputTypes') || isParameterDefined('architectures') || isParameterDefined('alphas')) { + if (isParameterDefined('inputSizes') || isParameterDefined('inputTypes') || isParameterDefined('architectures')) { modelParameterFolder.open(); } @@ -1020,7 +986,7 @@

TensorFlow.js Model Benchmark

tfliteModel.modelRunner.cleanUp(); } const benchmark = benchmarks[state.benchmark]; - tfliteModel = await benchmark.loadTflite(enableProfiling, state.architecture, state.alpha); + tfliteModel = await benchmark.loadTflite(enableProfiling, state.architecture); } function updateModelsDropdown(newValues) { From c940a4341bcfe257d68dc23036e95b30801a0249 Mon Sep 17 00:00:00 2001 From: Linchenn Date: Mon, 8 Aug 2022 14:18:42 -0700 Subject: [PATCH 7/8] rollback alpha for model_config --- e2e/benchmarks/local-benchmark/README.md | 1 - e2e/benchmarks/local-benchmark/index.html | 2 +- e2e/benchmarks/model_config.js | 21 ++++++++------------- 3 files changed, 9 insertions(+), 15 deletions(-) diff --git a/e2e/benchmarks/local-benchmark/README.md b/e2e/benchmarks/local-benchmark/README.md index 895f441d4e1..d594593f83c 100644 --- a/e2e/benchmarks/local-benchmark/README.md +++ b/e2e/benchmarks/local-benchmark/README.md @@ -70,7 +70,6 @@ It's easy to set up a web server to host benchmarks and run against them via e2e * Model related parameters: architecture: same as architecture (only certain models has it, such as MobileNetV3 and posenet)
- alpha: same as alpha (only certain models has it, such as MobileNetV3)
benchmark: same as models
inputSize: same as inputSizes
inputType: same as inputTypes
diff --git a/e2e/benchmarks/local-benchmark/index.html b/e2e/benchmarks/local-benchmark/index.html index b5f6ed1bc84..f5c2d13bdc4 100644 --- a/e2e/benchmarks/local-benchmark/index.html +++ b/e2e/benchmarks/local-benchmark/index.html @@ -284,7 +284,7 @@

TensorFlow.js Model Benchmark

kernelTiming: 'aggregate', inputSize: 0, inputType: '', - architecture: 'small', + architecture: 'small_075', modelType: '', modelUrl: '', isModelChanged: false, diff --git a/e2e/benchmarks/model_config.js b/e2e/benchmarks/model_config.js index 40172e9556c..fd46c051625 100644 --- a/e2e/benchmarks/model_config.js +++ b/e2e/benchmarks/model_config.js @@ -92,19 +92,16 @@ const benchmarks = { 'MobileNetV3': { type: 'GraphModel', alphas: ['075', '100'], - architectures: ['small', 'large'], - load: async ( - inputResolution = 224, modelArchitecture = 'small', - inputType = 'tensor', alpha = '075') => { + architectures: ['small_075', 'small_100', 'large_075', 'large_100'], + load: async (inputResolution = 224, modelArchitecture = 'small_075') => { const url = `https://tfhub.dev/google/tfjs-model/imagenet/mobilenet_v3_${ - modelArchitecture}_${alpha}_224/classification/5/default/1`; + modelArchitecture}_224/classification/5/default/1`; return tf.loadGraphModel(url, {fromTFHub: true}); }, loadTflite: async ( - enableProfiling = false, modelArchitecture = 'small', - alpha = '075') => { + enableProfiling = false, modelArchitecture = 'small_075') => { const url = `https://tfhub.dev/google/lite-model/imagenet/mobilenet_v3_${ - modelArchitecture}_${alpha}_224/classification/5/metadata/1`; + modelArchitecture}_224/classification/5/metadata/1`; return tflite.loadTFLiteModel(url, {enableProfiling}); }, predictFunc: () => { @@ -118,12 +115,10 @@ const benchmarks = { }, 'MobileNetV2': { type: 'GraphModel', - alphas: ['050', '075', '100'], - load: async ( - inputResolution = 224, modelArchitecture = '', inputType = 'tensor', - alpha = '050') => { + architectures: ['050', '075', '100'], + load: async (inputResolution = 224, modelArchitecture = '050') => { const url = `https://tfhub.dev/google/tfjs-model/imagenet/mobilenet_v2_${ - alpha}_224/classification/3/default/1`; + modelArchitecture}_224/classification/3/default/1`; return tf.loadGraphModel(url, {fromTFHub: true}); }, predictFunc: () => { From 7b3e29bb328b1563d5990698c11205aa1746784d Mon Sep 17 00:00:00 2001 From: Linchenn Date: Mon, 8 Aug 2022 14:46:52 -0700 Subject: [PATCH 8/8] Update model_config.js --- e2e/benchmarks/model_config.js | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/e2e/benchmarks/model_config.js b/e2e/benchmarks/model_config.js index fd46c051625..467e4875b60 100644 --- a/e2e/benchmarks/model_config.js +++ b/e2e/benchmarks/model_config.js @@ -91,7 +91,6 @@ function predictFunction(input) { const benchmarks = { 'MobileNetV3': { type: 'GraphModel', - alphas: ['075', '100'], architectures: ['small_075', 'small_100', 'large_075', 'large_100'], load: async (inputResolution = 224, modelArchitecture = 'small_075') => { const url = `https://tfhub.dev/google/tfjs-model/imagenet/mobilenet_v3_${ @@ -126,11 +125,11 @@ const benchmarks = { return predictFunction(input); }, }, - // Currently, for mibilnet_v2, only alpha=100 has tflite model. Since users - // could tune the alpha for 'mobilenet_v2' tfjs models, while we could only - // provides mibilnet_v2_lite with alpha=100 on the tflite backend, so - // mibilnet_v2_lite is separated from mibilnet_v2 and fixes alpha=100; othwise - // it would confuse users. + // Currently, for mibilnet_v2, only the architectures with alpha=100 has + // tflite model. Since users could tune the alpha for 'mobilenet_v2' tfjs + // models, while we could only provides mibilnet_v2_lite with alpha=100 on the + // tflite backend, so mibilnet_v2_lite is separated from mibilnet_v2 and fixes + // alpha=100; othwise it would confuse users. 'MobileNetV2Lite': { type: 'GraphModel', load: async () => {