Модель Tensorflow Keras не будет работать с графическим интерфейсом TkinterPython

Программы на Python
Ответить Пред. темаСлед. тема
Гость
 Модель Tensorflow Keras не будет работать с графическим интерфейсом Tkinter

Сообщение Гость »

Я создал модель распознавания лиц Tensorflow и графический интерфейс tkinter для ее реализации. Графический интерфейс работает сам по себе, но когда я пытаюсь реализовать модель в графическом интерфейсе, веб-камера зависает, и все кнопки для загрузки видео и изображений перестают работать. Я не знаю, что я сделал не так.
Это модель и графический интерфейс вместе:

Код: Выделить всё

import tensorflow as tf
import tkinter as tk
from tkinter import filedialog
from tkinter import font
from PIL import Image, ImageTk
import cv2
import numpy as np
import urllib.request

print("OpenCV version:", cv2.__version__)
print("NumPy version:", np.__version__)

face_cascade_url = "https://raw.githubusercontent.com/opencv/opencv/master/data/haarcascades/haarcascade_frontalface_default.xml"
face_cascade_name = "haarcascade_frontalface_default.xml"
urllib.request.urlretrieve(face_cascade_url, face_cascade_name)
facec = cv2.CascadeClassifier(face_cascade_name)

print("check1")

class FacialExpressionModel:
EMOTIONS_LIST = ["Angry", "Disgusted", "Fearful", "Happy", "Neutral", "Sad", "Surprised"]

def __init__(self, model_file):
self.model = tf.keras.models.load_model(model_file)
self.model.make_predict_function()

def predict_emotion(self, img):
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray_img, 1.3, 5)
emotions = []

for (x, y, w, h) in faces:
roi = gray_img[y:y + h, x:x + w]
roi = cv2.resize(roi, (48, 48))
roi = np.expand_dims(roi, axis=0)
roi = np.expand_dims(roi, axis=-1)
preds = self.model.predict(roi)
emotion = FacialExpressionModel.EMOTIONS_LIST[np.argmax(preds)]
emotions.append(emotion)

return emotions

def __init__(self, model_json_file, model_weights_file):
# load model from JSON file
with open(model_json_file, "r") as json_file:
loaded_model_json = json_file.read()
self.loaded_model = model_from_json(loaded_model_json)
# load weights into the new model
self.loaded_model.load_weights(model_weights_file)
self.loaded_model.make_predict_function()

def predict_emotion(self, img):
self.preds = self.loaded_model.predict(img)
return FacialExpressionModel.EMOTIONS_LIST[np.argmax(self.preds)]

class VideoCamera:
def __init__(self, root):
self.video = cv2.VideoCapture(0)
self.root = root
self.canvas = tk.Canvas(self.root, width=377, height=377)
self.canvas.place(x=10, y=10)
self.running = True
self.canvas.pack()
self.uploaded_image = None
self.uploaded_video = None

self.model = FacialExpressionModel("C:/New folder/face_emotion_net.h5")

self.create_widgets()

# Start updating the video feed
self.update_video_feed()

def create_widgets(self):
self.instructions_button = tk.Button(self.root, text="Instructions", command=self.showInstructions)
self.instructions_button.place(x=400, y=10, width=191, height=61)

self.start_stop_button = tk.Button(self.root, text="Start/Stop Webcam", command=self.startStopWebcam)
self.start_stop_button.place(x=400, y=260, width=200, height=50)

self.upload_video_button = tk.Button(self.root, text="Upload Video", command=self.uploadVideo)
self.upload_video_button.place(x=400, y=80, width=88, height=70)

self.upload_image_button = tk.Button(self.root, text="Upload Image", command=self.uploadImage)
self.upload_image_button.place(x=500, y=80, width=90, height=70)

self.message_label = tk.Label(self.root, text="If program is to work, please turn on\n webcam feed or upload a photo/video.")
self.message_label.place(x=390, y=160)

def showInstructions(self):
# Create a new Toplevel window
instructions_window = tk.Toplevel(self.root)
instructions_window.title("Instructions")

# Define instructions text
instructions_text = "How To Use This Program:\n1. This is a deep learning facial expression recogition system that detects one of 7 emotions: Angry, Disgusted, Fearful, Happy, Neutral, Sad, and Surprised. \n2.  Either turn on the webcam feed or upload a photo/video.\n3. Press 'Start/Stop Webcam' to start or stop the webcam feed.\n4. Use 'Upload Video' button to upload a video file.\n5. Use 'Upload Image' button to upload an image file.\n5.  Follow the facial expression recognition in the main window and have fun!"

# Add Label widget to display instructions text
instructions_label = tk.Label(instructions_window, text=instructions_text)
instructions_label.pack(padx=10, pady=10, anchor="center")

def startStopWebcam(self):
if self.running:
self.video.release()
self.running = False
else:
self.video = cv2.VideoCapture(0)
self.running = True

def uploadVideo(self):
video_path = filedialog.askopenfilename(filetypes=[("Video files", "*.mp4;*.avi;*.mkv")])
if video_path:
self.uploaded_video = cv2.VideoCapture(video_path)
self.video = None
self.uploaded_image = None

def uploadImage(self):
image_path = filedialog.askopenfilename(filetypes=[("Image files", "*.jpg;*.png;*.gif;*.jfif")])
if image_path:
self.uploaded_image = Image.open(image_path)
self.video = None
self.uploaded_video = None

def get_frame(self):
if self.uploaded_image is not None:
fr = np.array(self.uploaded_image)
fr = cv2.resize(fr, (377, 377))
elif self.uploaded_video is not None:
ret, fr = self.uploaded_video.read()
if not ret:
return np.zeros((377, 377, 3), dtype=np.uint8)
fr = cv2.resize(fr, (377, 377))
elif self.video is not None:
ret, fr = self.video.read()
if not ret:
return np.zeros((377, 377, 3), dtype=np.uint8)
fr = cv2.resize(fr, (377, 377))
else:
return np.zeros((377, 377, 3), dtype=np.uint8)

gray_fr = cv2.cvtColor(fr, cv2.COLOR_BGR2GRAY)
faces = facec.detectMultiScale(gray_fr, 1.3, 5)

for (x, y, w, h) in faces:
fc = gray_fr[y:y + h, x:x + w]
roi = cv2.resize(fc, (48, 48))
pred = self.model.predict_emotion(roi[np.newaxis, :, :, np.newaxis])
emotion = FacialExpressionModel.EMOTIONS_LIST[np.argmax(pred)]

cv2.putText(fr, emotion, (x, y), font, 1, (255, 255, 0), 2)
cv2.rectangle(fr, (x, y), (x + w, y + h), (255, 0, 0), 2)  # Draw rectangle around detected face

return fr

def update_video_feed(self):
frame = self.get_frame()
if frame is not None:
img = Image.fromarray(frame)
imgtk = ImageTk.PhotoImage(image=img)
self.canvas.imgtk = imgtk  # Keep a reference to prevent garbage collection
self.canvas.delete("all")
self.canvas.create_image(0, 0, anchor=tk.NW, image=imgtk)

if self.running:
self.root.after(10, self.update_video_feed)  # Update every 10 milliseconds
else:
self.video.release()

print("check2")
# Load Haarcascade XML file
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')

print("check3")
# Start Tkinter main event loop
root = tk.Tk()
root.title("Facial Expression Recognition")

camera = VideoCamera(root)
root.mainloop()

Это сам по себе рабочий графический интерфейс:

Код: Выделить всё

import tensorflow as tf
import tkinter as tk
from tkinter import filedialog
from tkinter import font
from PIL import Image, ImageTk
import cv2
import numpy as np
import urllib.request

print("OpenCV version:", cv2.__version__)
print("NumPy version:", np.__version__)

face_cascade_url = "https://raw.githubusercontent.com/opencv/opencv/master/data/haarcascades/haarcascade_frontalface_default.xml"
face_cascade_name = "haarcascade_frontalface_default.xml"
urllib.request.urlretrieve(face_cascade_url, face_cascade_name)
facec = cv2.CascadeClassifier(face_cascade_name)

print("check1")

class VideoCamera:
def __init__(self, root):
self.video = cv2.VideoCapture(0)
self.root = root
self.canvas = tk.Canvas(self.root, width=377, height=377)
self.canvas.place(x=10,  y=10)
self.running = True
self.canvas.pack()
self.uploaded_image=None
self.uploaded_video=None
#self.label = label

self.create_widgets()

# Start updating the video feed
self.update_video_feed()

def create_widgets(self):
self.instructions_button = tk.Button(self.root, text="Instructions", command=self.showInstructions)
self.instructions_button.place(x=400, y=10, width=191, height=61)

self.start_stop_button = tk.Button(self.root, text="Start/Stop Webcam", command=self.startStopWebcam)
self.start_stop_button.place(x=400, y=260, width=200, height=50)

self.upload_video_button = tk.Button(self.root, text="Upload Video", command=self.uploadVideo)
self.upload_video_button.place(x=400, y=80, width=88, height=70)

self.upload_image_button = tk.Button(self.root, text="Upload Image", command=self.uploadImage)
self.upload_image_button.place(x=500, y=80, width=90, height=70)

self.message_label = tk.Label(self.root, text="If program is to work, please turn on\n webcam feed or upload a photo/video.")
self.message_label.place(x=390, y=160)

def showInstructions(self):
# Create a new Toplevel window
instructions_window = tk.Toplevel(self.root)
instructions_window.title("Instructions")

# Define instructions text
instructions_text = "How To Use This Program:\n1. This is a deep learning facial expression recogition system that detects on of 7 emotions: Anger, Disgust, Fear, Happiness, Neutral, Sadness and Surprise. \n2. Either turn on the webcam feed or upload a photo/video.\n3. Press 'Start/Stop Webcam' to start or stop the webcam feed.\n4. Use 'Upload Video' button to upload a video file.\n5. Use 'Upload Image' button to upload an image file.\n5.  Follow the facial expression recognition in the main window and have fun!"

# Add Label widget to display instructions text
instructions_label = tk.Label(instructions_window, text=instructions_text)
instructions_label.pack(padx=10, pady=10, anchor="center")

def startStopWebcam(self):
if self.running:
self.video.release()
self.running = False
else:
self.video = cv2.VideoCapture(0)
self.running = True

def uploadVideo(self):
video_path = filedialog.askopenfilename(filetypes=[("Video files", "*.mp4;*.avi;*.mkv")])
if video_path:
self.uploaded_video = cv2.VideoCapture(video_path)
self.video = None
self.uploaded_image = None

def uploadImage(self):
image_path = filedialog.askopenfilename(filetypes=[("Image files", "*.jpg;*.png;*.gif;*.jfif")])
if image_path:
self.uploaded_image = Image.open(image_path)
self.video = None
self.uploaded_video = None

def get_frame(self):
if self.uploaded_image is not None:
fr = np.array(self.uploaded_image)
fr = cv2.resize(fr,(377,377))
gray_fr = cv2.cvtColor(fr, cv2.COLOR_RGB2GRAY)
faces = facec.detectMultiScale(gray_fr, 1.3, 5)

for (x, y, w, h) in faces:
cv2.rectangle(fr, (x, y), (x+w, y+h), (255, 0, 0), 2)  # Draw rectangle around detected face
return fr if fr is not None else np.zeros((377, 377, 3), dtype=np.uint8)

elif self.uploaded_video is not None:
_, fr = self.uploaded_video.read()
if fr is None:
return np.zeros((377, 377, 3), dtype=np.uint8)
fr = cv2.resize(fr,(377,377))
gray_fr = cv2.cvtColor(fr, cv2.COLOR_BGR2GRAY)
faces = facec.detectMultiScale(gray_fr, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(fr, (x, y), (x+w, y+h), (255, 0, 0), 2)  # Draw rectangle around detected face
return fr

elif self.video is not None:
_, fr = self.video.read()
if fr is None:
return np.zeros((480, 640, 3), dtype=np.uint8)
gray_fr = cv2.cvtColor(fr, cv2.COLOR_BGR2GRAY)
faces = facec.detectMultiScale(gray_fr, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(fr, (x, y), (x+w, y+h), (255, 0, 0), 2)  # Draw rectangle around detected face
return fr

else:
return np.zeros((480, 640, 3), dtype=np.uint8)

def update_video_feed(self):
frame = self.get_frame()
if frame is not None:
img = Image.fromarray(frame)
imgtk = ImageTk.PhotoImage(image=img)
self.canvas.imgtk = imgtk  # Keep a reference to prevent garbage collection
self.canvas.delete("all")
self.canvas.create_image(0, 0, anchor=tk.NW, image=imgtk)

if self.running:
self.root.after(10, self.update_video_feed)  # Update every 10 milliseconds
else:
self.video.release()

print("check2")
# Load Haarcascade XML file
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')

print("check3")
# Start Tkinter main event loop
root = tk.Tk()
root.title("Facial Expression Recognition")

camera = VideoCamera(root)
root.mainloop()

Я также получаю это сообщение об ошибке, когда камера зависает:

Код: Выделить всё

gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.error: OpenCV(4.9.0) d:\a\opencv-python\opencv-python\opencv\modules\imgproc\src\color.simd_helpers.hpp:92: error: (-2:Unspecified error) in function '__cdecl cv::impl::`anonymous-namespace'::CvtHelper Invalid number of channels in input image:
>     'VScn::contains(scn)'
> where
>     'scn' is 1

Кроме того, рамки обнаружения вокруг лиц не устанавливаются

Подробнее здесь: https://stackoverflow.com/questions/781 ... kinter-gui
Реклама
Ответить Пред. темаСлед. тема

Быстрый ответ

Изменение регистра текста: 
Смайлики
:) :( :oops: :roll: :wink: :muza: :clever: :sorry: :angel: :read: *x)
Ещё смайлики…
   
К этому ответу прикреплено по крайней мере одно вложение.

Если вы не хотите добавлять вложения, оставьте поля пустыми.

Максимально разрешённый размер вложения: 15 МБ.

  • Похожие темы
    Ответы
    Просмотры
    Последнее сообщение
  • Возникли проблемы с графическим интерфейсом чата при использовании tkinter, поскольку ответы пользователя не могут остав
    Anonymous » » в форуме Python
    0 Ответы
    73 Просмотры
    Последнее сообщение Anonymous
  • Как починить Tkinter? Каждый код с графическим интерфейсом сбоя Mac OS с Respring
    Anonymous » » в форуме Python
    0 Ответы
    5 Просмотры
    Последнее сообщение Anonymous
  • Кнопки с графическим интерфейсом tkinter.
    Anonymous » » в форуме Python
    0 Ответы
    2 Просмотры
    Последнее сообщение Anonymous
  • Кнопки с графическим интерфейсом tkinter.
    Anonymous » » в форуме Python
    0 Ответы
    2 Просмотры
    Последнее сообщение Anonymous
  • Из keras.models, keras.layers и keras.optimizers импорт не разрешен.
    Anonymous » » в форуме Python
    0 Ответы
    91 Просмотры
    Последнее сообщение Anonymous

Вернуться в «Python»