Я пытаюсь получить углы из трех точек в MediaPipe (11,13,15 // 12,14,16), чтобы увидеть, правильно или нет человека. Но когда я накормил моделью видео, я столкнулся с этой ошибкой: < /p>
INFO: Created TensorFlow Lite XNNPACK delegate for CPU.
WARNING: All log messages before absl::InitializeLog() is called are written to STDERR
W0000 00:00:1739209248.264311 98432 inference_feedback_manager.cc:114] Feedback manager requires a model with a single signature inference. Disabling support for feedback tensors.
W0000 00:00:1739209248.331459 133516 inference_feedback_manager.cc:114] Feedback manager requires a model with a single signature inference. Disabling support for feedback tensors.
< /code>
Вот мой код: < /p>
import cv2
import numpy as np
import time
import PoseModule as pm
from flask import Flask #you have installed flask already, you need to install flask-login, flask-sqlalchemy and other modules.
cap = cv2.VideoCapture(r"C:\Users\svsiv\OneDrive\Desktop\Nexathon - Working Progress\ML model\PoseVideos\curls.mp4") # r prefix is used to process the file address as a raw string, done because i was getting Syntax Error: unicode error
detector = pm.poseDetector()
while True:
#success, img = cap.read()
#img = cv2.resize(img, (1280,720))
img = cv2.imread(r"\ML model\PoseVideos\curls.mp4")
img = detector.findPose(img, False)
lmList = detector.findPosition(img, False)
print(lmList)
if len(lmList) != 0:
# Right Arm
detector.findAngle(img, 12, 14, 16)
#Left Arm
detector.findAngle(img, 11, 13, 15)
cv2.imshow("Image", img)
cv2.waitKey(1)
< /code>
posemodule.py:
import cv2
import mediapipe as mp
import time
import math
import numpy as np
class poseDetector():
def __init__(self, mode = False, upBody = False, smooth = True, detectionCon = 0.5, trackCon = 0.5):
self.mode = mode
self.upBody = upBody
self.smooth = smooth
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpDraw = mp.solutions.drawing_utils
self.mpPose = mp.solutions.pose
self.pose = self.mpPose.Pose(
self.mode,
self.upBody,
self.smooth,
False, #enable_segmentation = False (default)
True, #smooth_segmentation = True (default)
self.detectionCon,
self.trackCon
)
def findPose(self, img, draw = True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.pose.process(imgRGB)
if self.results.pose_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, self.results.pose_landmarks, self.mpPose.POSE_CONNECTIONS)
return img
def findPosition(self, img, draw=True):
self.lmList = []
if self.results.pose_landmarks:
for id, lm in enumerate(self.results.pose_landmarks.landmark):
h, w, c = img.shape
#print(id, lm)
cx, cy = int(lm.x * w), int(lm.y * h)
self.lmList.append([id, cx, cy])
if draw:
cv2.circle(img, (cx, cy), 5, (255, 0, 0), cv2.FILLED)
return self.lmList
def findAngle(self, img, p1, p2, p3, draw = True):
x1, y1 = self.lmList[p1][1:]
x2, y2 = self.lmList[p2][1:]
x3, y3 = self.lmList[p3][1:]
# Calculating the angles:
#angle = math.degrees(math.atan2(y3-y2, x3-x2) - math.atan2(y1-y2, x1-x2))
#print("angle = ", angle)
v1 = np.array([x1 - x2, y1 - y2]) # Vector from elbow to shoulder
v2 = np.array([x3 - x2, y3 - y2]) # Vector from elbow to wrist
angle_rad = np.arccos(np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)))
angle_deg = np.degrees(angle_rad)
# Get the *flexion* angle (the smaller angle)
flexion_angle = min(angle_deg, 360 - angle_deg) #this is the fix
print(flexion_angle)
# draw
if draw:
cv2.line(img, (x1,y1), (x2,y2), (255,255,255), 3)
cv2.line(img, (x3,y3), (x2,y2), (255,255,255), 3)
cv2.circle(img, (x1, y1), 10, (0, 0, 255), cv2.FILLED)
cv2.circle(img, (x1, y1), 15, (0, 0, 255), 2)
cv2.circle(img, (x2, y2), 10, (0, 0, 255), cv2.FILLED)
cv2.circle(img, (x2, y2), 15, (0, 0, 255), 2)
cv2.circle(img, (x3, y3), 10, (0, 0, 255), cv2.FILLED)
cv2.circle(img, (x3, y3), 15, (0, 0, 255), 2)
cv2.putText(img, str(int(flexion_angle)), (x2-50, y2+50), cv2.FONT_HERSHEY_PLAIN, 2, (255,0,0), 2)
def main():
image_path = r'\ML model\PoseVideos\curls.mp4'
cap = cv2.VideoCapture(image_path)
pTime = 0
detector = poseDetector()
while True:
success, img = cap.read()
img = cv2.resize(img, (1280, 720))
img = detector.findPose(img)
lmList = detector.findPosition(img, draw=False)
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, str(int(fps)), (70, 50), cv2.FONT_HERSHEY_PLAIN, 3,
(255, 0, 0), 3)
cv2.imshow("Image", img)
cv2.waitKey(1)
if __name__ == "__main__":
main()
Подробнее здесь: https://stackoverflow.com/questions/794 ... -mediapipe
Столкнувшись с этой ошибкой при выполнении оценки позы с OpenCV и MediaPipe ⇐ Python
-
- Похожие темы
- Ответы
- Просмотры
- Последнее сообщение
-
-
Столкнувшись с этой ошибкой при выполнении оценки позы с OpenCV и MediaPipe
Anonymous » » в форуме Python - 0 Ответы
- 27 Просмотры
-
Последнее сообщение Anonymous
-
-
-
Столкнувшись с этой ошибкой при выполнении оценки позы с OpenCV и MediaPipe [закрыто]
Anonymous » » в форуме Python - 0 Ответы
- 20 Просмотры
-
Последнее сообщение Anonymous
-