python openCV人脸识别中的鼻子检测问题

yvfmudvl  于 2023-08-06  发布在  Python
关注(0)|答案(2)|浏览(119)

我正在玩openCV,我得到了一个错误说(-215:Assert失败)!函数'cv::CascadeClassifier::detectMultiScale'中的empty()。我不知道为什么。
代码:

import numpy as np
import cv2

cap = cv2.VideoCapture(0)

#face classifier
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')

#eye classifier
eye_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_eye.xml')
nose_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_mcs_nose.xml')
while True:
    ret, frame = cap.read()

    #detect face
    #convert image to grey
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    #use grey image in face classifier
    faces = face_cascade.detectMultiScale(gray, 1.3, 10)

    #defining faces for the classifier
    for (x, y, w, h) in faces:
        cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 5)

        #find eyes on face
        roi_gray = gray[y:y+w, x:x+w]
        roi_color = frame[y:y+h, x:x+w]

        #detect eyes with eye classifier
        eyes = eye_cascade.detectMultiScale(roi_gray, 1.3, 10)
        for (ex, ey, ew, eh) in eyes:
            cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 5)

        nose = nose_cascade.detectMultiScale(roi_gray, 1.3, 10)
        for (ex, ey, ew, eh) in nose:
            cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 0, 255), 5)

    cv2.imshow('frame', frame)

    if cv2.waitKey(1) == ord('q'):
        break

cap.release()
cv2.destroyAllWindows()

字符串
我已经检查了代码的拼写错误和语法,但它仍然没有工作

isr3a4wc

isr3a4wc1#

我让你的代码与眼睛和脸一起工作,但我没有鼻子xml来修复鼻子,所以下面是我能为你修复的所有内容:

import numpy as np
import cv2

cap = cv2.VideoCapture(1)

#  face classifier
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')

face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')

while True:
    ret, frame = cap.read()

    cv2.imwrite("frame.jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 100])  # jpg 100% quality

    #  detect face
    read_jpg_frame = cv2.imread('frame.jpg', 0)

    #  use grey image in face classifier
    faces = face_cascade.detectMultiScale(read_jpg_frame, scaleFactor=1.1, minNeighbors=4)

    #  defining faces for the classifier
    for (x, y, w, h) in faces:

        #  detect eyes with eye classifier
        eyes = eye_cascade.detectMultiScale(read_jpg_frame[y:y + w, x:x + w], scaleFactor=1.1, minNeighbors=4)
        for (ex, ey, ew, eh) in eyes:
            cv2.rectangle(read_jpg_frame, (x, y), (x + w, y + h), (255, 0, 0), 5)
            cv2.rectangle(read_jpg_frame[y:y+h, x:x+w], (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 5)

        #for (ex, ey, ew, eh) in nose:
            #cv2.rectangle(frame[y:y+h, x:x+w], (ex, ey), (ex + ew, ey + eh), (0, 0, 255), 5)

    cv2.imshow('frame', read_jpg_frame)

    if cv2.waitKey(1) == ord('q'):
        break

cap.release()
cv2.destroyAllWindows()

字符串

ql3eal8s

ql3eal8s2#

这个有滑块,所以你可以调整结果

# for desktop capture, find the rectangle of a specific width and height
#
# running the program pops up a window to watch the video.
# the program video window shows the first monitor,
# but watch the program video window on second extended monitor

import cv2
import numpy as np  # used for finding the color of the rectangle

# start of section for the "to_slide_Color()" function

def redefining_M_N_Value_result_to_float(Value):
    if Value == 0:
        Value = 1
    elif Value == 1:
        Value = 2
    elif Value == 2:
        Value = 3
    elif Value == 3:
        Value = 4
    elif Value == 4:
        Value = 5
    elif Value == 5:
        Value = 6
    elif Value == 6:
        Value = 7
    elif Value == 7:
        Value = 8
    elif Value == 8:
        Value = 9
    elif Value == 9:
        Value = 10
    elif Value == 10:
        Value = 11
    return Value

def redefining_S_F_Value_result_to_float(Value):
    if Value == 0:
        Value = 1.1
    elif Value == 1:
        Value = 1.2
    elif Value == 2:
        Value = 1.3
    elif Value == 3:
        Value = 1.4
    elif Value == 4:
        Value = 1.5
    elif Value == 5:
        Value = 1.6
    elif Value == 6:
        Value = 1.7
    elif Value == 7:
        Value = 1.8
    elif Value == 8:
        Value = 1.9
    elif Value == 9:
        Value = 2.0
    elif Value == 10:
        Value = 2.1
    return Value

def fun(x):
    pass

# Window for trackbar
cv2.namedWindow("Detection",cv2.WINDOW_NORMAL)
cv2.resizeWindow('Detection', 500, 500)

def to_slide_Color():
    # Get positions of trackbars
    Face_S_F = cv2.getTrackbarPos("Face_S_F", "Detection")
    Face_M_N = cv2.getTrackbarPos("Face_M_N", "Detection")
    Eye_S_F = cv2.getTrackbarPos("Eye_S_F", "Detection")
    Eye_M_N = cv2.getTrackbarPos("Eye_M_N", "Detection")

    return Face_S_F, Face_M_N, Eye_S_F, Eye_M_N

cv2.createTrackbar("Face_S_F", "Detection", 0, 20, fun)
cv2.createTrackbar("Face_M_N", "Detection", 0, 10, fun)
cv2.createTrackbar("Eye_S_F", "Detection", 0, 20, fun)
cv2.createTrackbar("Eye_M_N", "Detection", 0, 10, fun)

# end of section for the "to_slide_Color()" function

# start of section for opening the video and creating counter variable and two haarcascade variables

# Path to video file
cap = cv2.VideoCapture(
    1,
    apiPreference=cv2.CAP_ANY,
    params=[cv2.CAP_PROP_FRAME_WIDTH, 1280, cv2.CAP_PROP_FRAME_HEIGHT, 720],
)  # I made cap = 1280, 720 resolution to speed the program up on my computer. I have a rtx 3060, obs studio at 60 fps

# Used as counter variable, it's not specific to either the finding the rectangle shape or the rectangle color
count = 1

#  face classifier
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')

face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')

# end of section for opening the video and creating counter variable and two haarcascade variables

while True:
    (Face_S_F, Face_M_N, Eye_S_F, Eye_M_N) = to_slide_Color()

    Face_S_F = redefining_S_F_Value_result_to_float(Face_S_F)
    Eye_S_F = redefining_S_F_Value_result_to_float(Eye_S_F)
    Face_M_N = redefining_M_N_Value_result_to_float(Face_M_N)
    Eye_M_N = redefining_M_N_Value_result_to_float(Eye_M_N)

    # this is the start of face and eye detector

    ret, frame = cap.read()

    cv2.imwrite("frame.jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 100])  # jpg 100% quality

    #  detect face
    read_jpg_frame = cv2.imread('frame.jpg', 0)

    #  use grey image in face classifier
    faces = face_cascade.detectMultiScale(read_jpg_frame, scaleFactor=Face_S_F, minNeighbors=Face_M_N)

    #  defining faces for the classifier
    for (x, y, w, h) in faces:
        print(Eye_S_F)
        #  detect eyes with eye classifier
        eyes = eye_cascade.detectMultiScale(read_jpg_frame[y:y + w, x:x + w], scaleFactor=Eye_S_F, minNeighbors=Eye_M_N)
        for (ex, ey, ew, eh) in eyes:
            cv2.rectangle(read_jpg_frame, (x, y), (x + w, y + h), (255, 0, 0), 5)
            cv2.rectangle(read_jpg_frame[y:y + h, x:x + w], (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 5)

    cv2.imshow('frame', read_jpg_frame)

    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

# Release the video
cap.release()

# Destroy the windows
cv2.destroyAllWindows()

字符串

相关问题