Anonymous
Невозможно запустить Facenet или машинное обучение в React Native.
Сообщение
Anonymous » 23 июл 2024, 03:19
Это мой код для AttendanceScreen, когда я запускаю его, я получаю ошибку обнаружения лица: [typeerror: ошибка сетевого запроса] ошибка подтверждения фотографии: [typeerror: не удалось выполнить сетевой запрос]
Код: Выделить всё
import React, { useEffect, useState, useRef } from 'react';
import { Text, View, Image, Button, StyleSheet, Alert } from 'react-native';
import { Camera, useCameraDevice } from 'react-native-vision-camera';
import RNFS from 'react-native-fs';
import '@tensorflow/tfjs-react-native';
import * as tf from '@tensorflow/tfjs';
import { fetch, decodeJpeg } from '@tensorflow/tfjs-react-native';
const AbsensiScreen = () => {
const [cameraPermission, setCameraPermission] = useState(null);
const device = useCameraDevice('front');
const camera = useRef(null);
const [capturedPhoto, setCapturedPhoto] = useState(null);
const [showPreview, setShowPreview] = useState(false);
const checkCameraPermission = async () => {
const permission = await Camera.requestCameraPermission();
setCameraPermission(permission === 'authorized');
const status = Camera.getCameraPermissionStatus();
console.log('Camera permission status:', status);
if (status === 'granted') {
setCameraPermission(true);
} else {
setCameraPermission(false);
}
};
useEffect(() => {
checkCameraPermission();
}, []);
if (cameraPermission === null) {
return Checking camera permission...;
} else if (!cameraPermission) {
return Camera permission not granted;
}
if (!device) {
return No camera device available;
}
const takePhoto = async () => {
try {
if (!camera.current) {
console.error('Camera reference not available.');
return;
}
const photo = await camera.current.takePhoto();
setCapturedPhoto(photo.path);
setShowPreview(true);
} catch (error) {
console.error('Error capturing photo:', error);
}
};
async function loadBackend() {
await tf.ready();
return tf.getBackend();
}
const loadFaceNetModel = async () => {
loadBackend()
const modelUrl = require('../assets/model/model.json');
const model = await tf.loadGraphModel(modelUrl);
return model;
};
const detectFace = async (photoPath) => {
try {
const imageData = await fetch(`file://${photoPath}`, {}, { isBinary: true }).then((res) => res.arrayBuffer());
const imageTensor = decodeJpeg(new Uint8Array(imageData));
const model = await loadFaceNetModel();
const embeddings = model.predict(imageTensor.expandDims(0));
return embeddings;
} catch (error) {
console.error('Error detecting face:', error);
return null;
}
};
const loadKnownFaces = async () => {
const folderPath = `${RNFS.PicturesDirectoryPath}/absensi`;
const files = await RNFS.readDir(folderPath);
const knownFaces = [];
for (const file of files) {
if (file.isFile()) {
const imageData = await fetch(`file://${file.path}`, {}, { isBinary: true }).then((res) => res.arrayBuffer());
const imageTensor = decodeJpeg(new Uint8Array(imageData));
const model = await loadFaceNetModel();
const embeddings = model.predict(imageTensor.expandDims(0));
knownFaces.push({
id: file.name.split('.')[0], // Extract the name without extension
embeddings: embeddings
});
}
}
return knownFaces;
};
const compareFaces = (predictions, knownFaces) => {
let bestMatch = null;
let bestDistance = Infinity;
for (const knownFace of knownFaces) {
const distance = tf.norm(predictions.sub(knownFace.embeddings), 'euclidean').dataSync()[0];
if (distance < bestDistance) {
bestDistance = distance;
bestMatch = knownFace.id;
}
}
// Threshold for similarity
if (bestDistance < 0.6) { // You may need to adjust this threshold
return bestMatch;
} else {
return null;
}
};
const confirmPhoto = async () => {
try {
if (!capturedPhoto) {
Alert.alert("Error", "No photo captured.");
return;
}
const predictions = await detectFace(capturedPhoto);
const knownFaces = await loadKnownFaces();
const match = compareFaces(predictions, knownFaces);
if (match) {
Alert.alert("Success", `Face recognized successfully! ID: ${match}`);
} else {
Alert.alert("Failed", "Face not recognized.");
}
setShowPreview(false);
setCapturedPhoto(null);
} catch (error) {
console.error('Error confirming photo:', error);
Alert.alert("Error", "Failed to confirm photo.");
}
};
const retakePhoto = () => {
setCapturedPhoto(null);
setShowPreview(false);
};
return (
{showPreview ? (
{capturedPhoto && (
)}
) : (
)}
);
};
const styles = StyleSheet.create({
container: {
flex: 1,
},
camera: {
flex: 1,
},
previewContainer: {
position: 'absolute',
bottom: 0,
left: 0,
right: 0,
backgroundColor: 'rgba(0, 0, 0, 0.7)',
padding: 20,
alignItems: 'center',
},
capturedImage: {
width: 200,
height: 200,
marginBottom: 20,
borderRadius: 10,
},
buttonContainer: {
flexDirection: 'row',
justifyContent: 'space-evenly',
width: '100%',
},
});
export default AbsensiScreen;
Я хочу использовать model.json сети лиц для распознавания лиц на основе данных, которые сохраняются, когда пользователь регистрирует лицо, делая селфи. и отображать выходные данные в виде идентификатора, который является именем файла.
Подробнее здесь:
https://stackoverflow.com/questions/787 ... act-native
1721693965
Anonymous
Это мой код для AttendanceScreen, когда я запускаю его, я получаю ошибку обнаружения лица: [typeerror: ошибка сетевого запроса] ошибка подтверждения фотографии: [typeerror: не удалось выполнить сетевой запрос] [code]import React, { useEffect, useState, useRef } from 'react'; import { Text, View, Image, Button, StyleSheet, Alert } from 'react-native'; import { Camera, useCameraDevice } from 'react-native-vision-camera'; import RNFS from 'react-native-fs'; import '@tensorflow/tfjs-react-native'; import * as tf from '@tensorflow/tfjs'; import { fetch, decodeJpeg } from '@tensorflow/tfjs-react-native'; const AbsensiScreen = () => { const [cameraPermission, setCameraPermission] = useState(null); const device = useCameraDevice('front'); const camera = useRef(null); const [capturedPhoto, setCapturedPhoto] = useState(null); const [showPreview, setShowPreview] = useState(false); const checkCameraPermission = async () => { const permission = await Camera.requestCameraPermission(); setCameraPermission(permission === 'authorized'); const status = Camera.getCameraPermissionStatus(); console.log('Camera permission status:', status); if (status === 'granted') { setCameraPermission(true); } else { setCameraPermission(false); } }; useEffect(() => { checkCameraPermission(); }, []); if (cameraPermission === null) { return Checking camera permission...; } else if (!cameraPermission) { return Camera permission not granted; } if (!device) { return No camera device available; } const takePhoto = async () => { try { if (!camera.current) { console.error('Camera reference not available.'); return; } const photo = await camera.current.takePhoto(); setCapturedPhoto(photo.path); setShowPreview(true); } catch (error) { console.error('Error capturing photo:', error); } }; async function loadBackend() { await tf.ready(); return tf.getBackend(); } const loadFaceNetModel = async () => { loadBackend() const modelUrl = require('../assets/model/model.json'); const model = await tf.loadGraphModel(modelUrl); return model; }; const detectFace = async (photoPath) => { try { const imageData = await fetch(`file://${photoPath}`, {}, { isBinary: true }).then((res) => res.arrayBuffer()); const imageTensor = decodeJpeg(new Uint8Array(imageData)); const model = await loadFaceNetModel(); const embeddings = model.predict(imageTensor.expandDims(0)); return embeddings; } catch (error) { console.error('Error detecting face:', error); return null; } }; const loadKnownFaces = async () => { const folderPath = `${RNFS.PicturesDirectoryPath}/absensi`; const files = await RNFS.readDir(folderPath); const knownFaces = []; for (const file of files) { if (file.isFile()) { const imageData = await fetch(`file://${file.path}`, {}, { isBinary: true }).then((res) => res.arrayBuffer()); const imageTensor = decodeJpeg(new Uint8Array(imageData)); const model = await loadFaceNetModel(); const embeddings = model.predict(imageTensor.expandDims(0)); knownFaces.push({ id: file.name.split('.')[0], // Extract the name without extension embeddings: embeddings }); } } return knownFaces; }; const compareFaces = (predictions, knownFaces) => { let bestMatch = null; let bestDistance = Infinity; for (const knownFace of knownFaces) { const distance = tf.norm(predictions.sub(knownFace.embeddings), 'euclidean').dataSync()[0]; if (distance < bestDistance) { bestDistance = distance; bestMatch = knownFace.id; } } // Threshold for similarity if (bestDistance < 0.6) { // You may need to adjust this threshold return bestMatch; } else { return null; } }; const confirmPhoto = async () => { try { if (!capturedPhoto) { Alert.alert("Error", "No photo captured."); return; } const predictions = await detectFace(capturedPhoto); const knownFaces = await loadKnownFaces(); const match = compareFaces(predictions, knownFaces); if (match) { Alert.alert("Success", `Face recognized successfully! ID: ${match}`); } else { Alert.alert("Failed", "Face not recognized."); } setShowPreview(false); setCapturedPhoto(null); } catch (error) { console.error('Error confirming photo:', error); Alert.alert("Error", "Failed to confirm photo."); } }; const retakePhoto = () => { setCapturedPhoto(null); setShowPreview(false); }; return ( {showPreview ? ( {capturedPhoto && ( )} ) : ( )} ); }; const styles = StyleSheet.create({ container: { flex: 1, }, camera: { flex: 1, }, previewContainer: { position: 'absolute', bottom: 0, left: 0, right: 0, backgroundColor: 'rgba(0, 0, 0, 0.7)', padding: 20, alignItems: 'center', }, capturedImage: { width: 200, height: 200, marginBottom: 20, borderRadius: 10, }, buttonContainer: { flexDirection: 'row', justifyContent: 'space-evenly', width: '100%', }, }); export default AbsensiScreen; [/code] Я хочу использовать model.json сети лиц для распознавания лиц на основе данных, которые сохраняются, когда пользователь регистрирует лицо, делая селфи. и отображать выходные данные в виде идентификатора, который является именем файла. Подробнее здесь: [url]https://stackoverflow.com/questions/78781135/cannot-run-facenet-or-machine-learning-in-react-native[/url]