Я пытаюсь интегрировать пользовательскую модель tflite в Android, но возникает ошибка: Ошибка получения собственного адрAndroid

Форум для тех, кто программирует под Android
Ответить Пред. темаСлед. тема
Anonymous
 Я пытаюсь интегрировать пользовательскую модель tflite в Android, но возникает ошибка: Ошибка получения собственного адр

Сообщение Anonymous »

Здесь я пытаюсь интегрировать пользовательскую модель с языком жестов обнаружения в реальном времени, используя kotlin, библиотеку CameraX и tflite, но возникла такая ошибка
Error getting native address of native library: task_vision_jni_gms (Ask Gemini)
java.lang.RuntimeException: Error occurred when initializing ImageClassifier: Input tensor has type kTfLiteFloat32: it requires specifying NormalizationOptions metadata to preprocess input images.
at org.tensorflow.lite.task.gms.vision.classifier.ImageClassifier.initJniWithModelFdAndOptions(Native Method)

это моя модель кода перед преобразованием в .tflite
TRAIN_DIR = os.path.join(BASE_DIR, 'train')
VAL_DIR = os.path.join(BASE_DIR, 'val')
TEST_DIR = os.path.join(BASE_DIR, 'test')

IMAGE_SIZE = (150, 150)
BATCH_SIZE = 32

train_dataset = tf.keras.utils.image_dataset_from_directory(
TRAIN_DIR,
labels='inferred',
class_names=None,
color_mode='rgb',
batch_size=BATCH_SIZE,
image_size=IMAGE_SIZE,
label_mode='categorical',
shuffle=True,
)

val_dataset = tf.keras.utils.image_dataset_from_directory(
VAL_DIR,
labels='inferred',
class_names=None,
color_mode='rgb',
batch_size=BATCH_SIZE,
image_size=IMAGE_SIZE,
label_mode='categorical',
shuffle=True,
)

test_dataset = tf.keras.utils.image_dataset_from_directory(
TEST_DIR,
labels='inferred',
class_names=None,
color_mode='rgb',
batch_size=BATCH_SIZE,
image_size=IMAGE_SIZE,
label_mode='categorical',
shuffle=True,
)

# Lanjutkan dengan normalisasi dataset
normalization_layer = tf.keras.layers.Rescaling(1./255)
train_dataset = train_dataset.map(lambda x, y: (normalization_layer(x), y))
val_dataset = val_dataset.map(lambda x, y: (normalization_layer(x), y))
test_dataset = test_dataset.map(lambda x, y: (normalization_layer(x), y))
SHUFFLE_BUFFER_SIZE = 100
PREFETCH_BUFFER_SIZE = tf.data.AUTOTUNE

train_dataset_prepared = (train_dataset
.shuffle(SHUFFLE_BUFFER_SIZE)
.cache()
.prefetch(PREFETCH_BUFFER_SIZE)
)

val_dataset_prepared = (val_dataset
.shuffle(SHUFFLE_BUFFER_SIZE)
.cache()
.prefetch(PREFETCH_BUFFER_SIZE)
)

test_dataset_prepared = (test_dataset
.cache()
.prefetch(PREFETCH_BUFFER_SIZE)
)
from tensorflow.keras.layers import Dense,Conv2D,MaxPooling2D,Flatten,Dropout
from tensorflow.keras.models import Sequential
from keras.optimizers import Adam

#Mendefinisikan CNN Model
model = Sequential()
model.add(Conv2D(32, (3,3), activation='relu', input_shape=IMAGE_SIZE + (3,)))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(64, (3,3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(128, (3,3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(256, (3,3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(256, (3,3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(512, activation='relu'))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
#Output
model.add(Dense(26, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer=Adam(0.0001),
metrics=['accuracy']
)
model.summary()
training_history = model.fit (
train_dataset_prepared,
validation_data = val_dataset_prepared,
epochs=50
)
# Evaluasi model dengan dataset test
test_loss, test_accuracy = model.evaluate(test_dataset_prepared)

print(f"Test loss: {test_loss}")
print(f"Test accuracy: {test_accuracy}")
# Simpan model TFLite ke file
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
with open('trained_model.tflite', 'wb') as f:
f.write(tflite_model

это мой помощник по коду для классификации изображений
package com.example.isyara.util

import android.content.Context
import android.graphics.Bitmap
import android.os.Build
import android.os.SystemClock
import android.util.Log
import android.view.Surface
import androidx.camera.core.ImageProxy
import com.example.isyara.R
import com.google.android.gms.tflite.client.TfLiteInitializationOptions
import com.google.android.gms.tflite.gpu.support.TfLiteGpu
import org.tensorflow.lite.DataType
import org.tensorflow.lite.gpu.CompatibilityList
import org.tensorflow.lite.support.common.ops.CastOp
import org.tensorflow.lite.support.common.ops.NormalizeOp
import org.tensorflow.lite.support.image.ImageProcessor
import org.tensorflow.lite.support.image.TensorImage
import org.tensorflow.lite.support.image.ops.ResizeOp
import org.tensorflow.lite.task.core.BaseOptions
import org.tensorflow.lite.task.core.vision.ImageProcessingOptions
import org.tensorflow.lite.task.gms.vision.TfLiteVision
import org.tensorflow.lite.task.gms.vision.classifier.Classifications
import org.tensorflow.lite.task.gms.vision.classifier.ImageClassifier

class ImageClassifierHelper(
var threshold: Float = 0.7f,
var maxResults: Int = 1,
val modelName: String = "model.tflite",
val context: Context,
val classifierListener: ClassifierListener?
) {
private var imageClassifier: ImageClassifier? = null

init {
TfLiteGpu.isGpuDelegateAvailable(context).onSuccessTask { gpuAvailable ->
val optionsBuilder = TfLiteInitializationOptions.builder()
if (gpuAvailable) {
optionsBuilder.setEnableGpuDelegateSupport(true)
}
TfLiteVision.initialize(context, optionsBuilder.build())
}.addOnSuccessListener {
setupImageClassifier()
}.addOnFailureListener {
classifierListener?.onError(context.getString(R.string.tflitevision_is_not_initialized_yet))
}
}

private fun setupImageClassifier() {
val optionsBuilder = ImageClassifier.ImageClassifierOptions.builder()
.setScoreThreshold(threshold)
.setMaxResults(maxResults)

val baseOptionsBuilder = BaseOptions.builder()

if (CompatibilityList().isDelegateSupportedOnThisDevice) {
baseOptionsBuilder.useGpu()
} else if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O_MR1) {
baseOptionsBuilder.useNnapi()
} else {
// Menggunakan CPU
baseOptionsBuilder.setNumThreads(4)
}

optionsBuilder.setBaseOptions(baseOptionsBuilder.build())

try {
imageClassifier = ImageClassifier.createFromFileAndOptions(
context,
modelName,
optionsBuilder.build()
)
} catch (e: IllegalStateException) {
classifierListener?.onError(context.getString(R.string.image_classifier_failed))
Log.e(TAG, e.message.toString())
}
}

fun classifyImage(image: ImageProxy) {

if (!TfLiteVision.isInitialized()) {
val errorMessage = context.getString(R.string.tflitevision_is_not_initialized_yet)
Log.e(TAG, errorMessage)
classifierListener?.onError(errorMessage)
return
}

if (imageClassifier == null) {
setupImageClassifier()
}

val imageProcessor = ImageProcessor.Builder()
.add(ResizeOp(150, 150, ResizeOp.ResizeMethod.BILINEAR))
.add(CastOp(DataType.FLOAT32))
.add(NormalizeOp(0f, 1f))
.build()

val tensorImage = imageProcessor.process(TensorImage.fromBitmap(toBitmap(image)))

val imageProcessingOptions = ImageProcessingOptions.builder()
.setOrientation(getOrientationFromRotation(image.imageInfo.rotationDegrees))
.build()

var inferenceTime = SystemClock.uptimeMillis()
val results = imageClassifier?.classify(tensorImage, imageProcessingOptions)
inferenceTime = SystemClock.uptimeMillis() - inferenceTime
classifierListener?.onResults(
results,
inferenceTime
)
}

private fun toBitmap(image: ImageProxy): Bitmap {
val bitmapBuffer = Bitmap.createBitmap(
image.width,
image.height,
Bitmap.Config.ARGB_8888
)
image.use { bitmapBuffer.copyPixelsFromBuffer(image.planes[0].buffer) }
image.close()
return bitmapBuffer
}

private fun getOrientationFromRotation(rotation: Int): ImageProcessingOptions.Orientation {
return when (rotation) {
Surface.ROTATION_270 -> ImageProcessingOptions.Orientation.BOTTOM_RIGHT
Surface.ROTATION_180 -> ImageProcessingOptions.Orientation.RIGHT_BOTTOM
Surface.ROTATION_90 -> ImageProcessingOptions.Orientation.TOP_LEFT
else -> ImageProcessingOptions.Orientation.RIGHT_TOP
}
}

interface ClassifierListener {
fun onError(error: String)
fun onResults(
results: List?,
inferenceTime: Long
)
}

companion object {
private const val TAG = "ImageClassifierHelper"
}
}

это мой код активности с использованием этого помощника
package com.example.isyara.ui.translate

import android.os.Build
import android.os.Bundle
import android.util.Log
import android.view.WindowInsets
import android.view.WindowManager
import android.widget.Toast
import androidx.activity.enableEdgeToEdge
import androidx.appcompat.app.AppCompatActivity
import androidx.camera.core.CameraSelector
import androidx.camera.core.ImageAnalysis
import androidx.camera.core.Preview
import androidx.camera.core.resolutionselector.AspectRatioStrategy
import androidx.camera.core.resolutionselector.ResolutionSelector
import androidx.camera.lifecycle.ProcessCameraProvider
import androidx.core.content.ContextCompat
import androidx.core.view.ViewCompat
import androidx.core.view.WindowInsetsCompat
import com.example.isyara.R
import com.example.isyara.databinding.ActivityTranslateBinding
import com.example.isyara.util.ImageClassifierHelper
import org.tensorflow.lite.task.gms.vision.classifier.Classifications
import java.text.NumberFormat
import java.util.concurrent.Executors

class TranslateActivity : AppCompatActivity() {
private lateinit var binding: ActivityTranslateBinding
private var cameraSelector: CameraSelector = CameraSelector.DEFAULT_BACK_CAMERA
private lateinit var imageClassifierHelper: ImageClassifierHelper

override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
enableEdgeToEdge()
binding = ActivityTranslateBinding.inflate(layoutInflater)
setContentView(binding.root)

ViewCompat.setOnApplyWindowInsetsListener(findViewById(R.id.main)) { v, insets ->
val systemBars = insets.getInsets(WindowInsetsCompat.Type.systemBars())
v.setPadding(systemBars.left, systemBars.top, systemBars.right, systemBars.bottom)
insets
}
}

public override fun onResume() {
super.onResume()
hideSystemUI()
startCamera()
}

private fun startCamera() {
imageClassifierHelper = ImageClassifierHelper(
context = this,
classifierListener = object : ImageClassifierHelper.ClassifierListener {
override fun onError(error: String) {
runOnUiThread {
Toast.makeText(this@TranslateActivity, error, Toast.LENGTH_SHORT).show()
}
}

override fun onResults(results: List?, inferenceTime: Long) {
runOnUiThread {
results?.let { it ->
if (it.isNotEmpty() && it[0].categories.isNotEmpty()) {
println(it)
val sortedCategories =
it[0].categories.sortedByDescending { it?.score }
val displayResult =
sortedCategories.joinToString("\n") {
"${it.label} " + NumberFormat.getPercentInstance()
.format(it.score).trim()
}
binding.tvResult.text = displayResult
} else {
binding.tvResult.text = ""
}
}
}
}
}
)

val cameraProviderFuture = ProcessCameraProvider.getInstance(this)

cameraProviderFuture.addListener({
val resolutionSelector = ResolutionSelector.Builder()
.setAspectRatioStrategy(AspectRatioStrategy.RATIO_16_9_FALLBACK_AUTO_STRATEGY)
.build()
val imageAnalyzer = ImageAnalysis.Builder()
.setResolutionSelector(resolutionSelector)
.setTargetRotation(binding.cameraView.display.rotation)
.setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
.setOutputImageFormat(ImageAnalysis.OUTPUT_IMAGE_FORMAT_RGBA_8888)
.build()
imageAnalyzer.setAnalyzer(Executors.newSingleThreadExecutor()) { image ->
imageClassifierHelper.classifyImage(image)
}

val cameraProvider: ProcessCameraProvider = cameraProviderFuture.get()
val preview = Preview.Builder().build().also {
it.setSurfaceProvider(binding.cameraView.surfaceProvider)
}
try {
cameraProvider.unbindAll()
cameraProvider.bindToLifecycle(
this,
cameraSelector,
preview,
imageAnalyzer
)
} catch (exc: Exception) {
Toast.makeText(
this@TranslateActivity,
"Gagal memunculkan kamera.",
Toast.LENGTH_SHORT
).show()
Log.e(TAG, "startCamera: ${exc.message}")
}
}, ContextCompat.getMainExecutor(this))
}

private fun hideSystemUI() {
@Suppress("DEPRECATION") if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.R) {
window.insetsController?.hide(WindowInsets.Type.statusBars())
} else {
window.setFlags(
WindowManager.LayoutParams.FLAG_FULLSCREEN,
WindowManager.LayoutParams.FLAG_FULLSCREEN
)
}
supportActionBar?.hide()
}

companion object {
private const val TAG = "TranslateActivity"

}
}

этот макет из TranslateActivity







































Подробнее здесь: https://stackoverflow.com/questions/792 ... or-getting
Реклама
Ответить Пред. темаСлед. тема

Быстрый ответ

Изменение регистра текста: 
Смайлики
:) :( :oops: :roll: :wink: :muza: :clever: :sorry: :angel: :read: *x)
Ещё смайлики…
   
К этому ответу прикреплено по крайней мере одно вложение.

Если вы не хотите добавлять вложения, оставьте поля пустыми.

Максимально разрешённый размер вложения: 15 МБ.

  • Похожие темы
    Ответы
    Просмотры
    Последнее сообщение

Вернуться в «Android»