I am trying to integrate a tensorflow-lite
model in my android
app. I have a simple model to differentiate between cats and dogs. I downloaded the required dataset from kaggle
and used teachable machine
website to train the model. Then I downloaded the model as tensorflowlite
and selected the quantized
option. Below is my android
code to detect the model
class ObjectDetectorHelper(
var threshold: Float = 0.5f,
var numThreads: Int = 2,
var maxResults: Int = 1,
var currentDelegate: Int = 0,
var currentModel: Int = 0,
val context: Context,
val objectDetectorListener: DetectorListener
) {
private val TAG = "ObjectDetectionHelper"
// For this example this needs to be a var so it can be reset on changes. If the ObjectDetector
// will not change, a lazy val would be preferable.
private var objectDetector: ObjectDetector? = null
private var gpuSupported = false
init {
TfLiteGpu.isGpuDelegateAvailable(context).onSuccessTask { gpuAvailable: Boolean ->
val optionsBuilder =
TfLiteInitializationOptions.builder()
if (gpuAvailable) {
optionsBuilder.setEnableGpuDelegateSupport(true)
}
TfLiteVision.initialize(context, optionsBuilder.build())
}.addOnSuccessListener {
objectDetectorListener.onInitialized()
}.addOnFailureListener{
objectDetectorListener.onError("TfLiteVision failed to initialize: "
+ it.message)
}
}
fun clearObjectDetector() {
objectDetector = null
}
// Initialize the object detector using current settings on the
// thread that is using it. CPU and NNAPI delegates can be used with detectors
// that are created on the main thread and used on a background thread, but
// the GPU delegate needs to be used on the thread that initialized the detector
private fun setupObjectDetector() {
if (!TfLiteVision.isInitialized()) {
Log.e(TAG, "setupObjectDetector: TfLiteVision is not initialized yet")
return
}
// Create the base options for the detector using specifies max results and score threshold
val optionsBuilder =
ObjectDetector.ObjectDetectorOptions.builder()
.setScoreThreshold(threshold)
.setMaxResults(maxResults)
// Set general detection options, including number of used threads
val baseOptionsBuilder = BaseOptions.builder().setNumThreads(numThreads)
// Use the specified hardware for running the model. Default to CPU
when (currentDelegate) {
DELEGATE_CPU -> {
// Default
}
DELEGATE_GPU -> {
if (gpuSupported) {
baseOptionsBuilder.useGpu()
} else {
objectDetectorListener.onError("GPU is not supported on this device")
}
}
DELEGATE_NNAPI -> {
baseOptionsBuilder.useNnapi()
}
}
optionsBuilder.setBaseOptions(baseOptionsBuilder.build())
val modelName =
when (currentModel) {
MODEL_MOBILENETV1 -> "model.tflite"
MODEL_EFFICIENTDETV0 -> "model.tflite"
MODEL_EFFICIENTDETV1 -> "model.tflite"
MODEL_EFFICIENTDETV2 -> "model.tflite"
else -> "model.tflite"
}
try {
objectDetector =
ObjectDetector.createFromFileAndOptions(context, modelName, optionsBuilder.build())
} catch (e: Exception) {
objectDetectorListener.onError(
"Object detector failed to initialize. See error logs for details"
)
Log.e(TAG, "TFLite failed to load model with error: " + e.message)
}
}
fun detect(image: Bitmap, imageRotation: Int) {
Log.i("resultssss","9")
if (!TfLiteVision.isInitialized()) {
Log.e(TAG, "detect: TfLiteVision is not initialized yet")
return
}
Log.i("resultssss","10")
if (objectDetector == null) {
setupObjectDetector()
}
Log.i("resultssss","11")
// Inference time is the difference between the system time at the start and finish of the
// process
var inferenceTime = SystemClock.uptimeMillis()
Log.i("resultssss","12")
// Create preprocessor for the image.
// See https://www.tensorflow.org/lite/inference_with_metadata/
// lite_support#imageprocessor_architecture
val imageProcessor = ImageProcessor.Builder().add(Rot90Op(-imageRotation / 90)).build()
Log.i("resultssss","13")
// Preprocess the image and convert it into a TensorImage for detection.
val tensorImage = imageProcessor.process(TensorImage.fromBitmap(image))
Log.i("resultssss","14")
val results = objectDetector?.detect(tensorImage)
Log.i("resultssss","15")
inferenceTime = SystemClock.uptimeMillis() - inferenceTime
Log.i("resultssss","16")
objectDetectorListener.onResults(
results,
inferenceTime,
tensorImage.height,
tensorImage.width)
}
interface DetectorListener {
fun onInitialized()
fun onError(error: String)
fun onResults(
results: MutableList<Detection>?,
inferenceTime: Long,
imageHeight: Int,
imageWidth: Int
)
}
companion object {
const val DELEGATE_CPU = 0
const val DELEGATE_GPU = 1
const val DELEGATE_NNAPI = 2
const val MODEL_MOBILENETV1 = 0
const val MODEL_EFFICIENTDETV0 = 1
const val MODEL_EFFICIENTDETV1 = 2
const val MODEL_EFFICIENTDETV2 = 3
}
}
class MainActivity : AppCompatActivity(), ObjectDetectorHelper.DetectorListener {
private lateinit var cameraExecutor: ExecutorService
private var mCameraProvider: ProcessCameraProvider? = null
private lateinit var viewFinder: PreviewView
private lateinit var objectDetectorHelper: ObjectDetectorHelper
private lateinit var bitmapBuffer: Bitmap
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_main)
viewFinder = findViewById(R.id.viewFinder)
objectDetectorHelper = ObjectDetectorHelper(
context = this,
objectDetectorListener = this
)
}
private fun setUpCamera() {
if (allPermissionsGranted()) {
startCamera()
}
}
private fun detectObjects(image: ImageProxy) {
Log.i("resultssss", "5")
// Copy out RGB bits to the shared bitmap buffer
image.use { bitmapBuffer.copyPixelsFromBuffer(image.planes[0].buffer) }
Log.i("resultssss", "6")
val imageRotation = image.imageInfo.rotationDegrees
Log.i("resultssss", "7")
// Pass Bitmap and rotation to the object detector helper for processing and detection
objectDetectorHelper.detect(bitmapBuffer, imageRotation)
Log.i("resultssss", "8")
}
private fun startCamera() {
val cameraProviderFuture = ProcessCameraProvider.getInstance(this)
cameraProviderFuture.addListener({
// Used to bind the lifecycle of cameras to the lifecycle owner
val cameraProvider: ProcessCameraProvider = cameraProviderFuture.get()
mCameraProvider = cameraProvider
// Preview
val surfacePreview = Preview.Builder()
.setTargetAspectRatio(AspectRatio.RATIO_4_3)
.setTargetRotation(viewFinder.display.rotation)
.build()
.also {
it.setSurfaceProvider(viewFinder.surfaceProvider)
}
val imageAnalyzer =
ImageAnalysis.Builder()
.setTargetAspectRatio(AspectRatio.RATIO_4_3)
.setTargetRotation(viewFinder.display.rotation)
.setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
.setOutputImageFormat(OUTPUT_IMAGE_FORMAT_RGBA_8888)
.build()
// The analyzer can then be assigned to the instance
.also {
Log.i("resultssss", "1")
it.setAnalyzer(cameraExecutor) { image ->
Log.i("resultssss", "2")
if (!::bitmapBuffer.isInitialized) {
Log.i("resultssss", "3")
// The image rotation and RGB image buffer are initialized only once
// the analyzer has started running
bitmapBuffer = Bitmap.createBitmap(
image.width,
image.height,
Bitmap.Config.ARGB_8888
)
}
Log.i("resultssss", "4")
detectObjects(image)
}
}
// Select back camera as a default
val cameraSelector = CameraSelector.DEFAULT_BACK_CAMERA
try {
// Unbind use cases before rebinding
cameraProvider.unbindAll()
// Bind use cases to camera
cameraProvider.bindToLifecycle(
this, cameraSelector, surfacePreview, imageAnalyzer
)
} catch (exc: Exception) {
Toast.makeText(this, exc.message, Toast.LENGTH_LONG).show()
}
}, ContextCompat.getMainExecutor(this))
}
private fun allPermissionsGranted() = REQUIRED_PERMISSIONS.all {
ContextCompat.checkSelfPermission(
baseContext, it
) == PackageManager.PERMISSION_GRANTED
}
override fun onDestroy() {
super.onDestroy()
objectDetectorHelper.clearObjectDetector()
cameraExecutor.shutdown()
}
companion object {
private const val REQUEST_CODE_PERMISSIONS = 10
private val REQUIRED_PERMISSIONS =
mutableListOf(
android.Manifest.permission.CAMERA
).toTypedArray()
}
override fun onRequestPermissionsResult(
requestCode: Int, permissions: Array<String>, grantResults:
IntArray
) {
super.onRequestPermissionsResult(requestCode, permissions, grantResults)
if (requestCode == REQUEST_CODE_PERMISSIONS) {
if (allPermissionsGranted()) {
setUpCamera()
} else {
Toast.makeText(
this,
"Permissions not granted by the user.",
Toast.LENGTH_SHORT
).show()
finish()
}
}
}
override fun onInitialized() {
if (allPermissionsGranted()) {
setUpCamera()
} else {
ActivityCompat.requestPermissions(
this, REQUIRED_PERMISSIONS, REQUEST_CODE_PERMISSIONS
)
}
cameraExecutor = Executors.newSingleThreadExecutor()
}
override fun onError(error: String) {
runOnUiThread { Toast.makeText(this, error, Toast.LENGTH_SHORT).show() }
}
override fun onResults(
results: MutableList<Detection>?,
inferenceTime: Long,
imageHeight: Int,
imageWidth: Int
) {
runOnUiThread {
Log.i(
"resultssss",
"${results?.get(0)?.categories.toString()} ${results?.get(0)?.boundingBox.toString()}"
)
}
}
}
Complete error log is as follows
Error getting native address of native library: task_vision_jni_gms
java.lang.IllegalArgumentException: Error occurred when initializing ObjectDetector: Mobile SSD models are expected to have exactly 4 outputs, found 1
at org.tensorflow.lite.task.gms.vision.detector.ObjectDetector.initJniWithModelFdAndOptions(Native Method)
at org.tensorflow.lite.task.gms.vision.detector.ObjectDetector.zzb(Unknown Source:0)
at org.tensorflow.lite.task.gms.vision.detector.zzb.createHandle(org.tensorflow:tensorflow-lite-task-vision-play-services@@0.4.2:4)
at org.tensorflow.lite.task.core.TaskJniUtils$1.createHandle(TaskJniUtils.java:70)
at org.tensorflow.lite.task.core.TaskJniUtils.createHandleFromLibrary(TaskJniUtils.java:91)
at org.tensorflow.lite.task.core.TaskJniUtils.createHandleFromFdAndOptions(TaskJniUtils.java:66)
at org.tensorflow.lite.task.gms.vision.detector.ObjectDetector.createFromFileAndOptions(org.tensorflow:tensorflow-lite-task-vision-play-services@@0.4.2:2)
at com.affinidi.tfdemoone.ObjectDetectorHelper.setupObjectDetector(ObjectDetectorHelper.kt:104)
at com.affinidi.tfdemoone.ObjectDetectorHelper.detect(ObjectDetectorHelper.kt:121)
at com.affinidi.tfdemoone.MainActivity.detectObjects(MainActivity.kt:89)
at com.affinidi.tfdemoone.MainActivity.startCamera$lambda$4$lambda$3$lambda$2(MainActivity.kt:132)
at com.affinidi.tfdemoone.MainActivity.$r8$lambda$cwS3iJ069sufgGf-nT7H81EEGtQ(Unknown Source:0)
at com.affinidi.tfdemoone.MainActivity$$ExternalSyntheticLambda3.analyze(Unknown Source:2)
at androidx.camera.core.ImageAnalysis.lambda$setAnalyzer$2(ImageAnalysis.java:481)
at androidx.camera.core.ImageAnalysis$$ExternalSyntheticLambda2.analyze(Unknown Source:2)
at androidx.camera.core.ImageAnalysisAbstractAnalyzer.lambda$analyzeImage$0$androidx-camera-core-ImageAnalysisAbstractAnalyzer(ImageAnalysisAbstractAnalyzer.java:286)
at androidx.camera.core.ImageAnalysisAbstractAnalyzer$$ExternalSyntheticLambda1.run(Unknown Source:14)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1167)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:641)
at java.lang.Thread.run(Thread.java:920)
Full source code is here
from Error getting native address of native library: task_vision_jni_gms
No comments:
Post a Comment