Я новичок в материалах компьютерного зрения и сталкиваюсь с некоторыми трудностями. Затем я попытался преобразовать его в tflite с помощью model.export (format = tflite)
Я затем поместил его здесь
Future analyzeImage(BuildContext context, XFile? selectedImage) async {
if (selectedImage == null) {
ScaffoldMessenger.of(context).showSnackBar(
const SnackBar(content: Text('Select an image first')),
);
return;
}
try {
await _loadModel();
final Uint8List imageBytes = await selectedImage.readAsBytes();
print('Processing image with TFLite...');
print('Image bytes length: ${imageBytes.length}');
final processedInput = await _preprocessImage(imageBytes);
final segmentationMask = await _runInference(processedInput);
final processedImage = await _postprocessResults(imageBytes, segmentationMask);
_showImageDialog(context, processedImage);
} catch (e) {
print('Error in analyzeImage: $e');
ScaffoldMessenger.of(context).showSnackBar(
SnackBar(content: Text('Error: ${e.toString()}')),
);
}
}
Future _preprocessImage(Uint8List imageBytes) async {
img.Image? image = img.decodeImage(imageBytes);
if (image == null) throw Exception('Failed to decode image');
img.Image resizedImage = img.copyResize(image, width: inputSize, height: inputSize);
// Convert to normalized float values (0-1 range)
List input = List.generate(
inputSize,
(y) => List.generate(
inputSize,
(x) => List.generate(numChannels, (c) {
// Fixed: Use the correct API for getting pixel values
img.Pixel pixel = resizedImage.getPixel(x, y);
switch (c) {
case 0: return pixel.r / 255.0; // Red
case 1: return pixel.g / 255.0; // Green
case 2: return pixel.b / 255.0; // Blue
default: return 0.0;
}
}),
),
);
return input;
}
Future _runInference(List input) async {
if (_interpreter == null) throw Exception('Model not loaded');
// Prepare input tensor (add batch dimension)
var inputTensor = [input];
// Prepare output tensor - adjust dimensions based on your model output
var outputTensor = List.generate(
1, // batch size
(b) => List.generate(
inputSize, // height
(h) => List.generate(
inputSize, // width
(w) => List.filled(1, 0.0), // number of classes/channels - adjust if needed
),
),
);
// Run inference
_interpreter!.run(inputTensor, outputTensor);
return outputTensor[0]; // Remove batch dimension
}
Future _postprocessResults(Uint8List originalImageBytes, List segmentationMask) async {
// Decode original image
img.Image? originalImage = img.decodeImage(originalImageBytes);
if (originalImage == null) throw Exception('Failed to decode original image');
// Create segmentation overlay
img.Image overlayImage = img.Image(width: originalImage.width, height: originalImage.height);
// Resize segmentation mask to match original image size
for (int y = 0; y < originalImage.height; y++) {
for (int x = 0; x < originalImage.width; x++) {
// Map coordinates to segmentation mask
int maskX = (x * inputSize / originalImage.width).round().clamp(0, inputSize - 1);
int maskY = (y * inputSize / originalImage.height).round().clamp(0, inputSize - 1);
// Get segmentation probability (assuming single class output)
double probability = segmentationMask[maskY][maskX][0];
// Create colored overlay based on probability
if (probability > 0.5) { // Threshold for segmentation
// Red overlay for segmented areas (teeth/orthodontic regions)
overlayImage.setPixel(x, y, img.ColorRgba8(255, 0, 0, (probability * 128).round()));
} else {
// Transparent for non-segmented areas
overlayImage.setPixel(x, y, img.ColorRgba8(0, 0, 0, 0));
}
}
}
// Blend original image with overlay
img.Image result = img.compositeImage(originalImage, overlayImage);
// Encode back to bytes
return Uint8List.fromList(img.encodePng(result));
}
void _showImageDialog(BuildContext context, Uint8List imageBytes) {
showDialog(
context: context,
builder: (BuildContext context) {
return AlertDialog(
title: const Text('Orthodontic Analysis Result'),
content: SingleChildScrollView(
child: Column(
mainAxisSize: MainAxisSize.min,
children: [
Image.memory(imageBytes),
const SizedBox(height: 16),
const Text(
'Red areas indicate detected orthodontic regions',
style: TextStyle(fontSize: 12, color: Colors.grey),
),
],
),
),
actions: [
TextButton(
onPressed: () => Navigator.of(context).pop(),
child: const Text('Close'),
),
],
);
},
);
}
void dispose() {
_interpreter?.close();
_interpreter = null;
_isModelLoaded = false;
}
}
< /code>
, который активируется, нажав кнопку. /> Любые идеи относительно того, что не так с этим кодом?>
Подробнее здесь: https://stackoverflow.com/questions/796 ... e-and-bein
Модель сегментации pytorcch не работает после преобразования в Tflite и развернутую на Android ⇐ Python
-
- Похожие темы
- Ответы
- Просмотры
- Последнее сообщение