Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add model debug mode support #5659

Merged
merged 5 commits into from
Nov 3, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions e2e/benchmarks/benchmark_util.js
Original file line number Diff line number Diff line change
Expand Up @@ -437,6 +437,7 @@ const TUNABLE_FLAG_VALUE_RANGE_MAP = {
WEBGL_FLUSH_THRESHOLD: [-1, 0, 0.25, 0.5, 0.75, 1, 1.25, 1.5, 1.75, 2],
WEBGL_PACK_DEPTHWISECONV: [true, false],
CHECK_COMPUTATION_FOR_ERRORS: [true, false],
KEEP_INTERMEDIATE_TENSORS: [true, false],
WEBGL_USE_SHAPES_UNIFORMS: [true, false],
WEBGPU_DEFERRED_SUBMIT_BATCH_SIZE: [1, 5, 10, 15, 20, 25, 30, 35, 40]
};
Expand Down
47 changes: 43 additions & 4 deletions e2e/benchmarks/local-benchmark/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,15 @@ <h2>TensorFlow.js Model Benchmark</h2>
const value = Number(urlState.get('WEBGPU_DEFERRED_SUBMIT_BATCH_SIZE'));
tunableFlagsControllers['WEBGPU_DEFERRED_SUBMIT_BATCH_SIZE'].setValue(value);
}
if (tunableFlagsControllers['KEEP_INTERMEDIATE_TENSORS'] != null &&
urlState.has('KEEP_INTERMEDIATE_TENSORS')) {
const value = urlState.get('KEEP_INTERMEDIATE_TENSORS');
if (value === 'true') {
tunableFlagsControllers['KEEP_INTERMEDIATE_TENSORS'].setValue(true);
} else {
tunableFlagsControllers['KEEP_INTERMEDIATE_TENSORS'].setValue(false);
}
}
}
// The model parameter will be update automically by onChange.
if (modelController != null && urlState.has('benchmark'))
Expand All @@ -157,6 +166,29 @@ <h2>TensorFlow.js Model Benchmark</h2>
}
}

async function printTensors(tensorsMap) {
if (!tensorsMap) {
return;
}
const keysOfTensors = Object.keys(tensorsMap);
for (let i = 0; i < keysOfTensors.length; i++) {
console.warn(keysOfTensors[i]);
for (let j = 0; j < tensorsMap[keysOfTensors[i]].length; j++) {
console.warn(await (tensorsMap[keysOfTensors[i]][j]).data());
}
}
}

async function predictAndGetPredictionData(predict, model, inferenceInput, debug) {
const prediction = await predict(model, inferenceInput);
if (debug) {
await printTensors(model.getIntermediateTensors());
model.disposeIntermediateTensors();
}
const predictionData = await getPredictionData(prediction);
return predictionData;
}

const state = {
numWarmups: warmupTimes,
numRuns: runTimes,
Expand Down Expand Up @@ -196,13 +228,20 @@ <h2>TensorFlow.js Model Benchmark</h2>
inferenceInput = generateInputFromDef(
state.inputs, model instanceof tf.GraphModel);
}
const referencePrediction = predict(model, inferenceInput);
referenceData = await getPredictionData(referencePrediction);

let keepIntermediateTensors = false;
try {
keepIntermediateTensors = tf.env().getBool('KEEP_INTERMEDIATE_TENSORS');
} catch (e) {
console.warn(e.message);
}

const debug = keepIntermediateTensors & (benchmarks[state.benchmark].supportDebug !== false);
referenceData = await predictAndGetPredictionData(predict, model, inferenceInput, debug);

await tf.setBackend(state.backend);
await showMsg(`Runing on ${state.backend}`);
const prediction = predict(model, inferenceInput);
predictionData = await getPredictionData(prediction);
predictionData = await predictAndGetPredictionData(predict, model, inferenceInput, debug);
} catch (e) {
showMsg('Error: ' + e.message);
throw e;
Expand Down
11 changes: 7 additions & 4 deletions e2e/benchmarks/local-benchmark/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -22,16 +22,19 @@ const BACKEND_FLAGS_MAP = {
'WASM_HAS_SIMD_SUPPORT',
'WASM_HAS_MULTITHREAD_SUPPORT',
'CHECK_COMPUTATION_FOR_ERRORS',
'KEEP_INTERMEDIATE_TENSORS',
],
webgl: [
'WEBGL_VERSION', 'WEBGL_CPU_FORWARD', 'WEBGL_PACK',
'WEBGL_FORCE_F16_TEXTURES', 'WEBGL_RENDER_FLOAT32_CAPABLE',
'WEBGL_FLUSH_THRESHOLD', 'WEBGL_PACK_DEPTHWISECONV',
'CHECK_COMPUTATION_FOR_ERRORS', 'WEBGL_USE_SHAPES_UNIFORMS'
'CHECK_COMPUTATION_FOR_ERRORS', 'WEBGL_USE_SHAPES_UNIFORMS',
'KEEP_INTERMEDIATE_TENSORS'
],
};
if (tf.engine().backendNames().includes('webgpu')) {
BACKEND_FLAGS_MAP['webgpu'] = ['WEBGPU_DEFERRED_SUBMIT_BATCH_SIZE'];
BACKEND_FLAGS_MAP['webgpu'] =
['WEBGPU_DEFERRED_SUBMIT_BATCH_SIZE', 'KEEP_INTERMEDIATE_TENSORS'];
}

const TUNABLE_FLAG_NAME_MAP = {
Expand All @@ -47,6 +50,7 @@ const TUNABLE_FLAG_NAME_MAP = {
WEBGL_PACK_DEPTHWISECONV: 'Packed depthwise Conv2d',
WEBGL_USE_SHAPES_UNIFORMS: 'Use shapes uniforms',
CHECK_COMPUTATION_FOR_ERRORS: 'Check each op result',
KEEP_INTERMEDIATE_TENSORS: 'Print intermediate tensors',
};
if (tf.engine().backendNames().includes('webgpu')) {
TUNABLE_FLAG_NAME_MAP['WEBGPU_DEFERRED_SUBMIT_BATCH_SIZE'] =
Expand Down Expand Up @@ -129,7 +133,6 @@ function showBackendFlagSettingsAndReturnTunableFlagControllers(
`because its value range is [${flagValueRange}].`);
continue;
}

let flagController;
if (typeof flagValueRange[0] === 'boolean') {
// Show checkbox for boolean flags.
Expand Down Expand Up @@ -209,7 +212,7 @@ async function initDefaultValueMap() {
function getTunableRange(flag) {
const defaultValue = TUNABLE_FLAG_DEFAULT_VALUE_MAP[flag];
if (flag === 'WEBGL_FORCE_F16_TEXTURES' ||
flag === 'WEBGL_PACK_DEPTHWISECONV') {
flag === 'WEBGL_PACK_DEPTHWISECONV' || 'KEEP_INTERMEDIATE_TENSORS') {
return [false, true];
} else if (flag === 'WEBGL_VERSION') {
const tunableRange = [];
Expand Down
51 changes: 31 additions & 20 deletions e2e/benchmarks/model_config.js
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,20 @@ const sentences = [
'what is the forecast for here at tea time',
];

function predictFunction(model, input) {
let debug = false;
try {
debug = tf.env().getBool('KEEP_INTERMEDIATE_TENSORS');
} catch (e) {
console.warn(e.message);
}
if (debug) {
return model => model.executeAsync(input);
} else {
return model => model.predict(input);
}
}

const benchmarks = {
'mobilenet_v2': {
type: 'GraphModel',
Expand All @@ -84,7 +98,7 @@ const benchmarks = {
},
predictFunc: () => {
const input = tf.randomNormal([1, 224, 224, 3]);
return model => model.predict(input);
return predictFunction(model, input);
}
},
'mesh_128': {
Expand All @@ -95,10 +109,8 @@ const benchmarks = {
return tf.loadGraphModel(url);
},
predictFunc: () => {
const zeros = tf.zeros([1, 128, 128, 3]);
return model => {
return model.predict(zeros)[0];
};
const input = tf.zeros([1, 128, 128, 3]);
return predictFunction(model, input);
},
},
'face_detector': {
Expand All @@ -109,10 +121,8 @@ const benchmarks = {
return tf.loadGraphModel(url);
},
predictFunc: () => {
const zeros = tf.zeros([1, 128, 128, 3]);
return model => {
return model.predict(zeros);
};
const input = tf.zeros([1, 128, 128, 3]);
return predictFunction(model, input);
},
},
'hand_detector': {
Expand All @@ -122,10 +132,8 @@ const benchmarks = {
return tf.loadGraphModel(url, {fromTFHub: true});
},
predictFunc: () => {
const zeros = tf.zeros([1, 256, 256, 3]);
return model => {
return model.predict(zeros);
};
const input = tf.zeros([1, 256, 256, 3]);
return predictFunction(model, input);
},
},
'hand_skeleton': {
Expand All @@ -135,14 +143,13 @@ const benchmarks = {
return tf.loadGraphModel(url, {fromTFHub: true});
},
predictFunc: () => {
const zeros = tf.zeros([1, 256, 256, 3]);
return model => {
return model.predict(zeros);
};
const input = tf.zeros([1, 256, 256, 3]);
return predictFunction(model, input);
},
},
'AutoML Image': {
type: 'GraphModel',
supportDebug: false,
load: async () => {
const url =
'https://storage.googleapis.com/tfjs-testing/tfjs-automl/img_classification/model.json';
Expand All @@ -155,6 +162,7 @@ const benchmarks = {
},
'AutoML Object': {
type: 'GraphModel',
supportDebug: false,
load: async () => {
const url =
'https://storage.googleapis.com/tfjs-testing/tfjs-automl/object_detection/model.json';
Expand All @@ -167,6 +175,7 @@ const benchmarks = {
},
'USE - batchsize 30': {
type: 'GraphModel',
supportDebug: false,
load: async () => {
return use.load();
},
Expand All @@ -180,6 +189,7 @@ const benchmarks = {
},
'USE - batchsize 1': {
type: 'GraphModel',
supportDebug: false,
load: async () => {
return use.load();
},
Expand All @@ -196,6 +206,7 @@ const benchmarks = {
inputSizes: [128, 256, 512, 1024],
architectures: ['MobileNetV1', 'ResNet50'],
inputTypes: ['image', 'tensor'],
supportDebug: false,
load: async (
inputResolution = 128, modelArchitecture = 'MobileNetV1',
inputType = 'image') => {
Expand Down Expand Up @@ -231,6 +242,7 @@ const benchmarks = {
},
'bodypix': {
type: 'GraphModel',
supportDebug: false,
// The ratio to the default camera size [480, 640].
inputSizes: [0.25, 0.5, 0.75, 1.0],
architectures: ['MobileNetV1', 'ResNet50'],
Expand Down Expand Up @@ -281,9 +293,7 @@ const benchmarks = {
},
predictFunc: (inputResolution = 128) => {
const input = tf.randomNormal([1, inputResolution, inputResolution, 3]);
return model => {
return model.predict(input);
};
return predictFunction(model, input);
},
},
'speech-commands': {
Expand Down Expand Up @@ -321,6 +331,7 @@ const benchmarks = {
],
inputTypes: ['image', 'tensor', 'imageBitmap'],
modelTypes: ['lite', 'full', 'heavy', 'lightning'],
supportDebug: false,
load: async (
inputResolution = 128, modelArchitecture = 'BlazePose-lite',
inputType = 'image') => {
Expand Down
Loading