Integrating TensorFlow Lite Model with Android Studio: Output Tensor Differences

115 views Asked by At

I'm new to TensorFlow Lite. I have a model converted from YOLOv5 with input and output like the one in this image, but I'm having trouble integrating it. I've seen sample code in Android Studio, but I couldn't get it to work for integrating the model into my project as shown in that image. I've also looked at several examples on GitHub, but the output tensors in those examples seem different from mine. Can you suggest an approach for integrating this model or recommend any resources for guidance?

Image with sample code, giving information about input and outputs

Image

Exception

Type mismatch: inferred type is Triple<RectF, String, TensorBuffer> but Triple<RectF, String, Int> was expected

Code

class ObjectDetectionAnalyzer(
    private val context: Context,
    private val resultListener: (result: Triple<RectF, String, Int>) -> Unit
) : ImageAnalysis.Analyzer {

    private lateinit var bestFpMetadataSecondVersion: BestFpMetadataSecondVersion

    override fun analyze(imageProxy: ImageProxy) {

        bestFpMetadataSecondVersion = BestFpMetadataSecondVersion.newInstance(context)

        // Creates inputs for reference.
        val tensorImage = TensorImage.fromBitmap(imageProxy.toBitmap())

        // Runs model inference and gets result.
        val outputs = bestFpMetadataSecondVersion.process(tensorImage) // process(tensorImage)
        val detectionResult = outputs.detectionResultList[0]

        // Gets result from DetectionResult.
        val location = detectionResult.locationAsRectF
        val category = detectionResult.categoryAsString
        val score = detectionResult.scoreAsInt

        resultListener(Triple(location, category, score))

        imageProxy.close()

    }

    private fun ImageProxy.toBitmap(): Bitmap {

        val yBuffer = planes[0].buffer // Y
        val uBuffer = planes[1].buffer // U
        val vBuffer = planes[2].buffer // V

        val ySize = yBuffer.remaining()
        val uSize = uBuffer.remaining()
        val vSize = vBuffer.remaining()

        val nv21 = ByteArray(ySize + uSize + vSize)

        yBuffer.get(nv21, 0, ySize)
        vBuffer.get(nv21, ySize, vSize)
        uBuffer.get(nv21, ySize + vSize, uSize)

        val yuvImage = YuvImage(
            nv21,
            ImageFormat.NV21,
            this.width,
            this.height,
            null
        )

        val out = ByteArrayOutputStream()

        yuvImage.compressToJpeg(
            Rect(
                0,
                0,
                yuvImage.width,
                yuvImage.height
            ),
            100,
            out
        )

        val imageBytes = out.toByteArray()

        return BitmapFactory.decodeByteArray(
            imageBytes,
            0,
            imageBytes.size
        )

    }

}

App Gradle

plugins {
    id 'com.android.application'
    id 'org.jetbrains.kotlin.android'
}

android {
    namespace 'com.example.objectdetectionstudy'
    compileSdk 34

    defaultConfig {
        applicationId "com.example.objectdetectionstudy"
        minSdk 24
        targetSdk 34
        versionCode 1
        versionName "1.0"

        testInstrumentationRunner "androidx.test.runner.AndroidJUnitRunner"
    }

    buildTypes {
        release {
            minifyEnabled false
            proguardFiles getDefaultProguardFile('proguard-android-optimize.txt'), 'proguard-rules.pro'
        }
    }
    compileOptions {
        sourceCompatibility JavaVersion.VERSION_1_8
        targetCompatibility JavaVersion.VERSION_1_8
    }
    kotlinOptions {
        jvmTarget = '1.8'
    }
    buildFeatures {
        viewBinding = true
        mlModelBinding true
    }
}

dependencies {

    implementation 'androidx.core:core-ktx:1.12.0'
    implementation 'androidx.appcompat:appcompat:1.6.1'
    implementation 'com.google.android.material:material:1.10.0'
    implementation 'androidx.constraintlayout:constraintlayout:2.1.4'

    implementation 'androidx.camera:camera-core:1.2.3'
    implementation 'androidx.camera:camera-camera2:1.2.3'
    implementation 'androidx.camera:camera-lifecycle:1.2.3'
    implementation 'androidx.camera:camera-video:1.2.3'
    implementation 'androidx.camera:camera-view:1.2.3'
    implementation 'androidx.camera:camera-extensions:1.2.3'

    implementation 'org.tensorflow:tensorflow-lite-support:0.4.2'
    implementation 'org.tensorflow:tensorflow-lite-metadata:0.3.1'
    implementation 'org.tensorflow:tensorflow-lite-gpu:2.12.0'
    implementation 'org.tensorflow:tensorflow-lite:2.12.0'

}

I've explored documentation and projects, but couldn't find a similar example for handling output tensors in my project.

0

There are 0 answers