网络摄像头捕获RTSP流大延迟OPENCV

l0oc07j2  于 2023-02-09  发布在  其他
关注(0)|答案(2)|浏览(1007)

我正在尝试在IP摄像头上进行一些处理,效果很好,但我看到现实世界和视频捕捉之间有大约7~10秒的滞后。
我使用的是rtsp://@ip:port/live ext
这款相机有一个网络界面(IE/ActiveX),显示图像的滞后非常低。(约200~300毫秒)。
我测试了这个code,当我把视频输入那里,它工作得很好,没有延迟,当我使用我的相机ip或相机无人机与RTSP协议的软件工作与延迟7~10s。
注:我设置的分辨率(1080,720),我使用的GPU NVIDIA Qaudro1000它的工作很好,这样我认为问题不是关于处理或硬件,它的代码。
编辑:这可能与视频捕获缓冲区有关。2有没有办法让它总是使用最新的图像?
edit2:我在VLC上获得了良好的延迟结果,仅延迟300毫秒
谢谢大家!
你可以看到我使用的代码如下:

import cv2
import time

import argparse
import numpy as np
from PIL import Image
from utils.anchor_generator import generate_anchors
from utils.anchor_decode import decode_bbox
from utils.nms import single_class_non_max_suppression
from load_model.pytorch_loader import load_pytorch_model, pytorch_inference

# model = load_pytorch_model('models/face_mask_detection.pth');
model = load_pytorch_model('models/model360.pth');
# anchor configuration
#feature_map_sizes = [[33, 33], [17, 17], [9, 9], [5, 5], [3, 3]]
feature_map_sizes = [[45, 45], [23, 23], [12, 12], [6, 6], [4, 4]]
anchor_sizes = [[0.04, 0.056], [0.08, 0.11], [0.16, 0.22], [0.32, 0.45], [0.64, 0.72]]
anchor_ratios = [[1, 0.62, 0.42]] * 5

# generate anchors
anchors = generate_anchors(feature_map_sizes, anchor_sizes, anchor_ratios)

# for inference , the batch size is 1, the model output shape is [1, N, 4],
# so we expand dim for anchors to [1, anchor_num, 4]
anchors_exp = np.expand_dims(anchors, axis=0)

id2class = {0: 'Mask', 1: 'NoMask'}

def inference(image,
              conf_thresh=0.5,
              iou_thresh=0.4,
              target_shape=(160, 160),
              draw_result=True,
              show_result=True
              ):
    '''
    Main function of detection inference
    :param image: 3D numpy array of image
    :param conf_thresh: the min threshold of classification probabity.
    :param iou_thresh: the IOU threshold of NMS
    :param target_shape: the model input size.
    :param draw_result: whether to daw bounding box to the image.
    :param show_result: whether to display the image.
    :return:
    '''
    # image = np.copy(image)
    output_info = []
    height, width, _ = image.shape
    image_resized = cv2.resize(image, target_shape)
    image_np = image_resized / 255.0  # 归一化到0~1
    image_exp = np.expand_dims(image_np, axis=0)

    image_transposed = image_exp.transpose((0, 3, 1, 2))

    y_bboxes_output, y_cls_output = pytorch_inference(model, image_transposed)
    # remove the batch dimension, for batch is always 1 for inference.
    y_bboxes = decode_bbox(anchors_exp, y_bboxes_output)[0]
    y_cls = y_cls_output[0]
    # To speed up, do single class NMS, not multiple classes NMS.
    bbox_max_scores = np.max(y_cls, axis=1)
    bbox_max_score_classes = np.argmax(y_cls, axis=1)

    # keep_idx is the alive bounding box after nms.
    keep_idxs = single_class_non_max_suppression(y_bboxes,
                                                 bbox_max_scores,
                                                 conf_thresh=conf_thresh,
                                                 iou_thresh=iou_thresh,
                                                 )

    for idx in keep_idxs:
        conf = float(bbox_max_scores[idx])
        class_id = bbox_max_score_classes[idx]
        bbox = y_bboxes[idx]
        # clip the coordinate, avoid the value exceed the image boundary.
        xmin = max(0, int(bbox[0] * width))
        ymin = max(0, int(bbox[1] * height))
        xmax = min(int(bbox[2] * width), width)
        ymax = min(int(bbox[3] * height), height)

        if draw_result:
            if class_id == 0:
                color = (0, 255, 0)
            else:
                color = (255, 0, 0)
            cv2.rectangle(image, (xmin, ymin), (xmax, ymax), color, 2)
            cv2.putText(image, "%s: %.2f" % (id2class[class_id], conf), (xmin + 2, ymin - 2),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.8, color)
        output_info.append([class_id, conf, xmin, ymin, xmax, ymax])

    if show_result:
        Image.fromarray(image).show()
    return output_info

def run_on_video(video_path, output_video_name, conf_thresh):
    cap = cv2.VideoCapture(video_path)
    height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
    width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
    fps = cap.get(cv2.CAP_PROP_FPS)
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    # writer = cv2.VideoWriter(output_video_name, fourcc, int(fps), (int(width), int(height)))
    total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
    if not cap.isOpened():
        raise ValueError("Video open failed.")
        return
    status = True
    idx = 0
    while status:
        start_stamp = time.time()
        status, img_raw = cap.read()
        img_raw = cv2.cvtColor(img_raw, cv2.COLOR_BGR2RGB)
        read_frame_stamp = time.time()
        if (status):
            inference(img_raw,
                      conf_thresh,
                      iou_thresh=0.5,
                      target_shape=(360, 360),
                      draw_result=True,
                      show_result=False)
            cv2.imshow('image', img_raw[:, :, ::-1])
            cv2.waitKey(1)
            inference_stamp = time.time()
            # writer.write(img_raw)
            write_frame_stamp = time.time()
            idx += 1
            print("%d of %d" % (idx, total_frames))
            print("read_frame:%f, infer time:%f, write time:%f" % (read_frame_stamp - start_stamp,
                                                                   inference_stamp - read_frame_stamp,
                                                                   write_frame_stamp - inference_stamp))
    # writer.release()

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Face Mask Detection")
    parser.add_argument('--img-mode', type=int, default=1, help='set 1 to run on image, 0 to run on video.')
    parser.add_argument('--img-path', type=str, help='path to your image.')
    parser.add_argument('--video-path', type=str, default='0', help='path to your video, `0` means to use camera.')
    # parser.add_argument('--hdf5', type=str, help='keras hdf5 file')
    args = parser.parse_args()
    if args.img_mode:
        imgPath = args.img_path
        img = cv2.imread(imgPath)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        inference(img, show_result=True, target_shape=(360, 360))
    else:
        video_path = args.video_path
        if args.video_path == '0':
            video_path = 0
        run_on_video(video_path, '', conf_thresh=0.5)

我不知道为什么在OpenCV上这么慢。我想知道一些让捕获更快的技巧。

koaltpgm

koaltpgm1#

问题出在OpenCV RTSP流实现中。
要从流中获取一个垫子,您需要初始化编解码器并向其提供几个压缩帧包。编解码器内部有一个帧缓冲区。它作为FIFO(先入先出)工作。您调用avcodec_send_packet(),然后调用avcodec_receive_frame()。返回的帧被 Package 到垫子对象中并返回给您。前几个包初始化缓冲区,不生成任何图片。
(more此处显示信息https://ffmpeg.org/doxygen/3.3/group__lavc__encdec.html
不要指望在python中使用opencv的RTSP会有低延迟,我所能找到的减少延迟的唯一方法是使用FFMPEG示例并用c++重写它。
增加I帧的数量可能会有帮助(剧透:不多)p.s.我的工作与RTSP流的一些例子:https://www.youtube.com/channel/UCOK7D73tj7Dl4ZyXE-J0UNA

wlsrxk51

wlsrxk512#

Opencv在它的解码器实现中有错误。你需要编辑opencv/modules/videoio/src/cap_ffmpeg_impl.hpp
enc->thread_count = get_number_of_cpus();之后添加以下内容:

AVDictionaryEntry* threads_entry = av_dict_get(dict, "threads", NULL, 0);
    if(threads_entry)
    {
        int i=1;
        if(sscanf(threads_entry->value, "%d", &i) == 1)
            enc->thread_count=i;
    }

从这个git仓库构建opencv_videoio_ffmpeg64.dllhttps://github.com/opencv/opencv_3rdparty/tree/ffmpeg/4.x
替换旧的.dll,然后将OPENCV_FFMPEG_CAPTURE_OPTIONS设置为"threads;1|protocol_whitelist;file,rtp,udp,tcp"

相关问题