From 1828cf6567551581f78cbca605f5e767c7cb1b2d Mon Sep 17 00:00:00 2001
From: Guoyu Wang <62914304+gwang-msft@users.noreply.github.com>
Date: Fri, 4 Jun 2021 17:57:29 -0700
Subject: [PATCH] Update ORT Mobile example [Android Image Classifier] (#6)
* Update image classifier to use prebuilt ort package
* Update label source
* update readme
* Remove deprecated jcenter repos
* Minor update
* Address CR comments
* Move create ort session to background, update components to latest version
---
.gitignore | 2 +
README.md | 6 +--
mobile/README.md | 9 ++++
.../image_classifications/android/README.md | 30 ++++++-------
.../android/app/build.gradle | 9 ++--
.../example/imageclassifier/MainActivity.kt | 44 ++++++++++++-------
.../example/imageclassifier/ORTAnalyzer.kt | 1 +
.../android/app/src/main/res/raw/.gitignore | 2 +-
.../android/build.gradle | 5 +--
9 files changed, 64 insertions(+), 44 deletions(-)
create mode 100644 mobile/README.md
diff --git a/.gitignore b/.gitignore
index dfcfd56f444f9..9b70681f638bb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -348,3 +348,5 @@ MigrationBackup/
# Ionide (cross platform F# VS Code tools) working folder
.ionide/
+
+.DS_Store
diff --git a/README.md b/README.md
index 032567e543297..086af7b77a46b 100644
--- a/README.md
+++ b/README.md
@@ -4,7 +4,7 @@
-This repo has examples for using [ONNX Runtime](https://github.com/microsoft/onnxruntime) (ORT) for inference.
+This repo has examples that demonstrate the use of [ONNX Runtime](https://github.com/microsoft/onnxruntime) (ORT) for inference.
## Examples
@@ -12,8 +12,8 @@ Outline the examples in the repository.
| Example | Description |
|-------------------|--------------------------------------------|
-|[Android Image Classifier](mobile/examples/image_classifications/android)| An example application for ONNX Runtime on Android. The example app uses image classification which is able to continuously classify the objects it sees from the device's camera in real-time and displays the most probable inference result on the screen. |
-|[JavaScript API examples](js)| Examples that demonstrates how to use JavaScript API for ONNX Runtime. |
+|[Mobile examples](mobile)| Examples that demonstrate how to use ONNX Runtime Mobile in mobile applications. |
+|[JavaScript API examples](js)| Examples that demonstrate how to use JavaScript API for ONNX Runtime. |
## Contributing
diff --git a/mobile/README.md b/mobile/README.md
new file mode 100644
index 0000000000000..85e1b4b87e17b
--- /dev/null
+++ b/mobile/README.md
@@ -0,0 +1,9 @@
+# ONNX Runtime Mobile examples
+
+The following examples demonstrate how to use ONNX Runtime Mobile in mobile applications.
+
+## Image classification
+
+The example app uses image classification which is able to continuously classify the objects it sees from the device's camera in real-time and displays the most probable inference results on the screen.
+
+- [Android Image Classifier](examples/image_classifications/android)
diff --git a/mobile/examples/image_classifications/android/README.md b/mobile/examples/image_classifications/android/README.md
index 64e312dbf1ebb..45297cf90c211 100644
--- a/mobile/examples/image_classifications/android/README.md
+++ b/mobile/examples/image_classifications/android/README.md
@@ -1,38 +1,34 @@
# ONNX Runtime Mobile image classification Android sample application
## Overview
-This is an example application for [ONNX Runtime](https://github.com/microsoft/onnxruntime) on Android. The demo app uses image classification which is able to continuously classify the objects it sees from the device's camera in real-time and displays the most probable inference result on the screen.
+This is an example application for [ONNX Runtime](https://github.com/microsoft/onnxruntime) on Android. The demo app uses image classification which is able to continuously classify the objects it sees from the device's camera in real-time and displays the most probable inference results on the screen.
This example is loosely based on [Google CodeLabs - Getting Started with CameraX](https://codelabs.developers.google.com/codelabs/camerax-getting-started)
### Model
-We use classic MobileNetV2(float) model and MobileNetV2 (uint8) in this sample app.
+We use pre-trained [TorchVision MOBILENET V2](https://pytorch.org/hub/pytorch_vision_mobilenet_v2/) in this sample app.
## Requirements
- Android Studio 4.1+ (installed on Mac/Windows/Linux)
- Android SDK 29+
- Android NDK r21+
-- Android device in developer mode and enable USB debugging
+- Android device with a camera in [developer mode](https://developer.android.com/studio/debug/dev-options) with USB debugging enabled
## Build And Run
-### Prerequisites
-- MobileNetV2 ort format model
-- labels text file (used for image classification)
-- Prebuilt ONNX Runtime arm64 Android Archive(AAR) files, which can be directly imported in Android Studio
-The above three files are provided and can be downloaded [here](https://1drv.ms/u/s!Auaxv_56eyubgQX-S_kTP0AP66Km?e=e8YMX1).
-
-[Optional] You can also build your own ONNX Runtime arm64 AAR files for Android. (See [build instructions here](https://www.onnxruntime.ai/docs/how-to/build.html#android) and [Build Android Archive(AAR)](https://www.onnxruntime.ai/docs/how-to/build.html#build-android-archive-aar)).
+### Step 0. [Optional] Prepare the ORT models
+Open [Mobilenet v2 Quantization with ONNX Runtime Notebook](https://github.com/microsoft/onnxruntime/blob/master/onnxruntime/python/tools/quantization/notebooks/imagenet_v2/mobilenet.ipynb), this notebook will demostrate how to,
+1. Export the pre-trained MobileNet V2 FP32 model from PyTorch to a FP32 ONNX model
+2. Quantize the FP32 ONNX model to an uint8 ONNX model
+3. Convert both FP32 and uint8 ONNX models to ORT models
+Note: this step is optional, you can download the FP32 and uint8 ORT models [here](https://1drv.ms/u/s!Auaxv_56eyubgQlfGIBWh-j1wYyl?e=GYKoL7).
### Step 1. Clone the ONNX Runtime Mobile examples source code and download required model files
Clone this ORT Mobile examples GitHub repository to your computer to get the sample application.
-Download the packages provided in `Prerequisites`.
-
-- Copy MobileNetV1 onnx model and the labels file to `example/image_classification/android/app/src/main/res/raw/`
-
-- Create `/libs` directory under `app/` and copy the AAR file `onnxruntime-release.aar` to `app/libs`
+- Download the labels file [here](https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt)
+- Copy MobileNetV2 ORT models and the labels file to `example/image_classification/android/app/src/main/res/raw/`
Then open the sample application in Android Studio. To do this, open Android Studio and select `Open an existing project`, browse folders and open the folder `examples/image_classification/android/`.
@@ -40,7 +36,7 @@ Then open the sample application in Android Studio. To do this, open Android Stu
### Step 2. Build the sample application in Android Studio
-Select `Build-Make Project` in the top toolbar in Android Studio and check the projects has built successfully.
+Select `Build -> Make Project` in the top toolbar in Android Studio and check the projects has built successfully.
@@ -54,7 +50,7 @@ Connect your Android Device to the computer and select your device in the top-do
-Then Select `Run-Run app` and this will prompt the app to be installed on your device.
+Then Select `Run -> Run app` and this will prompt the app to be installed on your device.
Now you can test and try by opening the app `ort_image_classifier` on your device. The app may request your permission for using the camera.
diff --git a/mobile/examples/image_classifications/android/app/build.gradle b/mobile/examples/image_classifications/android/app/build.gradle
index 96aa932952092..c234691ff6ad3 100644
--- a/mobile/examples/image_classifications/android/app/build.gradle
+++ b/mobile/examples/image_classifications/android/app/build.gradle
@@ -37,22 +37,23 @@ android {
dependencies {
// CameraX core library using the camera2 implementation
- def camerax_version = "1.0.0-rc03"
+ def camerax_version = "1.0.0"
// The following line is optional, as the core library is included indirectly by camera-camera2
implementation "androidx.camera:camera-camera2:${camerax_version}"
// If you want to additionally use the CameraX Lifecycle library
implementation "androidx.camera:camera-lifecycle:${camerax_version}"
// If you want to additionally use the CameraX View class
- implementation "androidx.camera:camera-view:1.0.0-alpha22"
+ implementation "androidx.camera:camera-view:1.0.0-alpha25"
implementation "org.jetbrains.kotlin:kotlin-stdlib-jdk8:$kotlin_version"
+ implementation "org.jetbrains.kotlinx:kotlinx-coroutines-android:1.3.9"
implementation 'androidx.core:core-ktx:1.3.2'
- implementation 'androidx.appcompat:appcompat:1.2.0'
+ implementation 'androidx.appcompat:appcompat:1.3.0'
implementation 'com.google.android.material:material:1.3.0'
implementation 'androidx.constraintlayout:constraintlayout:2.0.4'
testImplementation 'junit:junit:4.+'
androidTestImplementation 'androidx.test.ext:junit:1.1.2'
androidTestImplementation 'androidx.test.espresso:espresso-core:3.3.0'
- implementation(name: "onnxruntime-release", ext: "aar")
+ implementation 'com.microsoft.onnxruntime:onnxruntime-mobile:1.8.0'
}
\ No newline at end of file
diff --git a/mobile/examples/image_classifications/android/app/src/main/java/ai/onnxruntime/example/imageclassifier/MainActivity.kt b/mobile/examples/image_classifications/android/app/src/main/java/ai/onnxruntime/example/imageclassifier/MainActivity.kt
index 6a188e49c5d77..524181f4cf881 100644
--- a/mobile/examples/image_classifications/android/app/src/main/java/ai/onnxruntime/example/imageclassifier/MainActivity.kt
+++ b/mobile/examples/image_classifications/android/app/src/main/java/ai/onnxruntime/example/imageclassifier/MainActivity.kt
@@ -15,12 +15,15 @@ import androidx.camera.lifecycle.ProcessCameraProvider
import androidx.core.app.ActivityCompat
import androidx.core.content.ContextCompat
import kotlinx.android.synthetic.main.activity_main.*
+import kotlinx.coroutines.*
+import java.lang.Runnable
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors
class MainActivity : AppCompatActivity() {
private val backgroundExecutor: ExecutorService by lazy { Executors.newSingleThreadExecutor() }
private val labelData: List by lazy { readLabels() }
+ private val scope = CoroutineScope(Job() + Dispatchers.Main)
private var ortEnv: OrtEnvironment? = null
private var imageCapture: ImageCapture? = null
@@ -42,11 +45,7 @@ class MainActivity : AppCompatActivity() {
enable_quantizedmodel_toggle.setOnCheckedChangeListener { _, isChecked ->
enableQuantizedModel = isChecked
- imageAnalysis?.clearAnalyzer()
- imageAnalysis?.setAnalyzer(
- backgroundExecutor,
- ORTAnalyzer(createOrtSession(), ::updateUI)
- )
+ setORTAnalyzer()
}
}
@@ -73,9 +72,6 @@ class MainActivity : AppCompatActivity() {
imageAnalysis = ImageAnalysis.Builder()
.setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
.build()
- .also {
- it.setAnalyzer(backgroundExecutor, ORTAnalyzer(createOrtSession(), ::updateUI))
- }
try {
cameraProvider.unbindAll()
@@ -86,6 +82,8 @@ class MainActivity : AppCompatActivity() {
} catch (exc: Exception) {
Log.e(TAG, "Use case binding failed", exc)
}
+
+ setORTAnalyzer()
}, ContextCompat.getMainExecutor(this))
}
@@ -104,6 +102,7 @@ class MainActivity : AppCompatActivity() {
permissions: Array,
grantResults: IntArray
) {
+ super.onRequestPermissionsResult(requestCode, permissions, grantResults)
if (requestCode == REQUEST_CODE_PERMISSIONS) {
if (allPermissionsGranted()) {
startCamera()
@@ -142,20 +141,33 @@ class MainActivity : AppCompatActivity() {
}
}
- // Read ort model into a ByteArray
- private fun readModel(): ByteArray {
+ // Read MobileNet V2 classification labels
+ private fun readLabels(): List {
+ return resources.openRawResource(R.raw.imagenet_classes).bufferedReader().readLines()
+ }
+
+ // Read ort model into a ByteArray, run in background
+ private suspend fun readModel(): ByteArray = withContext(Dispatchers.IO) {
val modelID =
if (enableQuantizedModel) R.raw.mobilenet_v2_uint8 else R.raw.mobilenet_v2_float
- return resources.openRawResource(modelID).readBytes()
+ resources.openRawResource(modelID).readBytes()
}
- // Read MobileNet V2 classification labels
- private fun readLabels(): List {
- return resources.openRawResource(R.raw.labels).bufferedReader().readLines()
+ // Create a new ORT session in background
+ private suspend fun createOrtSession(): OrtSession? = withContext(Dispatchers.Default) {
+ ortEnv?.createSession(readModel())
}
- private fun createOrtSession(): OrtSession? {
- return ortEnv?.createSession(readModel())
+ // Create a new ORT session and then change the ImageAnalysis.Analyzer
+ // This part is done in background to avoid blocking the UI
+ private fun setORTAnalyzer(){
+ scope.launch {
+ imageAnalysis?.clearAnalyzer()
+ imageAnalysis?.setAnalyzer(
+ backgroundExecutor,
+ ORTAnalyzer(createOrtSession(), ::updateUI)
+ )
+ }
}
companion object {
diff --git a/mobile/examples/image_classifications/android/app/src/main/java/ai/onnxruntime/example/imageclassifier/ORTAnalyzer.kt b/mobile/examples/image_classifications/android/app/src/main/java/ai/onnxruntime/example/imageclassifier/ORTAnalyzer.kt
index fd5d422672381..3b0598da84622 100644
--- a/mobile/examples/image_classifications/android/app/src/main/java/ai/onnxruntime/example/imageclassifier/ORTAnalyzer.kt
+++ b/mobile/examples/image_classifications/android/app/src/main/java/ai/onnxruntime/example/imageclassifier/ORTAnalyzer.kt
@@ -92,6 +92,7 @@ internal class ORTAnalyzer(
val output = ortSession?.run(Collections.singletonMap(inputName, tensor))
output.use {
result.processTimeMs = SystemClock.uptimeMillis() - startTime
+ @Suppress("UNCHECKED_CAST")
val rawOutput = ((output?.get(0)?.value) as Array)[0]
val probabilities = softMax(rawOutput)
result.detectedIndices = getTop3(probabilities)
diff --git a/mobile/examples/image_classifications/android/app/src/main/res/raw/.gitignore b/mobile/examples/image_classifications/android/app/src/main/res/raw/.gitignore
index 14cb57e7efab9..d5bfe65956cd3 100644
--- a/mobile/examples/image_classifications/android/app/src/main/res/raw/.gitignore
+++ b/mobile/examples/image_classifications/android/app/src/main/res/raw/.gitignore
@@ -1,4 +1,4 @@
# model and lable files
*.onnx
*.ort
-labels.txt
\ No newline at end of file
+imagenet_classes.txt
diff --git a/mobile/examples/image_classifications/android/build.gradle b/mobile/examples/image_classifications/android/build.gradle
index 808b31912347a..a29c188519f89 100644
--- a/mobile/examples/image_classifications/android/build.gradle
+++ b/mobile/examples/image_classifications/android/build.gradle
@@ -3,7 +3,7 @@ buildscript {
ext.kotlin_version = "1.3.72"
repositories {
google()
- jcenter()
+ mavenCentral()
}
dependencies {
classpath "com.android.tools.build:gradle:4.1.2"
@@ -17,8 +17,7 @@ buildscript {
allprojects {
repositories {
google()
- jcenter()
- flatDir{dirs 'libs'}
+ mavenCentral()
}
}