c++ 如何使用ffmpeg从C代码中的png创建alpha视频?

14ifxucb  于 2023-07-01  发布在  其他
关注(0)|答案(1)|浏览(123)

我试图编码一个视频(MOV)从PNG图像与阿尔法通道在FFMPEG。使用cmd:

ffmpeg -i %d.png -r 25 -vcodec png -b:v 2500K out.mov -y

效果很好。
现在我想在C++代码中使用,但它得到了这样的错误:

[mov @ 0x29bf0c0] muxer does not support non seekable output

我在这样的代码中使用它:

oformat = av_guess_format("mov", nullptr, nullptr)
oformat->video_codec = AV_CODEC_ID_PNG;

ffmpeg显示:

ffmpeg -h muxer=mov
Muxer mov [QuickTime / MOV]:
Common extensions: mov.
Default video codec: h264.
Default audio codec: aac.

默认的视频编解码器是h264,但它可以使用上面的ffmpeg命令行使用png编码器创建mov

ffmpeg -i out.mov

Input #0, mov,mp4,m4a,3gp,3g2,mj2, from 'resa.mov':
  Metadata:
    major_brand     : qt
    minor_version   : 512
    compatible_brands: qt
    encoder         : Lavf59.24.100
  Duration: 00:00:03.00, start: 0.000000, bitrate: 13430 kb/s
  Stream #0:0: Video: png (png  / 0x20676E70), rgba(pc), 1280x720, 13427 kb/s, 25 fps, 25 tbr, 12800 tbn, 12800 tbc (default)
    Metadata:
      handler_name    : VideoHandler
      vendor_id       : FFMP

如何将此cmd转换为c代码?

ffmpeg -i %d.png -r 25 -vcodec png -b:v 2500K out.mov -y

我使用的代码:头文件:

#ifndef _RENDER_H_
#define _RENDER_H_
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <fstream>
#include <cstring>
#include <math.h>
#include <string.h>
#include <algorithm>
#include <string> 

extern "C"
{
#include <libavcodec/avcodec.h>
#include <libavcodec/avfft.h>

#include <libavdevice/avdevice.h>

#include <libavfilter/avfilter.h>
//#include <libavfilter/avfiltergraph.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>

#include <libavformat/avformat.h>
#include <libavformat/avio.h>

    

#include <libavutil/opt.h>
#include <libavutil/common.h>
#include <libavutil/channel_layout.h>
#include <libavutil/imgutils.h>
#include <libavutil/mathematics.h>
#include <libavutil/samplefmt.h>
#include <libavutil/time.h>
#include <libavutil/opt.h>
#include <libavutil/pixdesc.h>
#include <libavutil/file.h>

    
//#include "libavcodec/vdpau.h"
#include "libavutil/hwcontext.h"
//#include "libavutil/hwcontext_vdpau.h"

    

#include <libswscale/swscale.h>

    class VideoCapture {
    public:

        VideoCapture() {
            oformat = nullptr;
            ofctx = nullptr;
            videoStream = nullptr;
            videoFrame = nullptr;
            swsCtx = nullptr;
            frameCounter = 0;

            
            av_register_all();
            //av_log_set_callback(avlog_cb);
        }

        ~VideoCapture() {
            //Free();
        }

        void Init(std::string name, int width, int height, int fpsrate, int bitrate);

        void AddFrame(uint8_t *data);

        void Finish();

    private:

        AVOutputFormat *oformat;
        AVFormatContext *ofctx;
        AVIOContext *avio_out;
        AVStream *videoStream;
        AVFrame *videoFrame;

        AVCodec *codec;
        AVCodecContext *cctx;
        struct buffer_data *bd;
        struct buffer_data* res_video;
        SwsContext *swsCtx;
        //FILE *fp_write;
        char* filename;
        //int buf_len;
        int frameCounter;

        int fps;

        void Free();

        void Remux();
    };

    struct buffer_data {
        uint8_t *ptr;
        size_t size; ///< size left in the buffer
    };

}
#endif

和代码:

#include "VideoCapture.h"
#define FINAL_FILE_NAME "record.mov"
#define VIDEO_TMP_FILE "tmp.avi"

using namespace std;

FILE *fp_write;
static int write_packet(void *opaque, uint8_t *buf, size_t buf_size)
{
    struct buffer_data *bd = (struct buffer_data *)opaque;
    printf("ptr  :%p size:%zu\n", bd->ptr, bd->size);
    memcpy(bd->ptr + bd->size, buf, buf_size);
    bd->size = buf_size + bd->size;
    return 1;
}

void VideoCapture::Init(string filename, int width, int height, int fpsrate, int bitrate) {

    fps = fpsrate;

    int err;
    uint8_t *outbuffer=nullptr;
    outbuffer=(uint8_t*)av_malloc(32768);
    bd = (struct buffer_data*)malloc(sizeof(struct buffer_data));
    bd->ptr = (uint8_t*)av_malloc(1000000000);
    bd->size = 0;
    avio_out =avio_alloc_context(outbuffer, 32768,1,bd,nullptr,write_packet,nullptr);
    if (!(oformat = av_guess_format("mov", nullptr, nullptr))) {
        cout << "Failed to define output format"<< endl;
        return;
    }
    oformat->video_codec = AV_CODEC_ID_PNG;
    cout << "oformat->video_codec " << oformat->video_codec << endl;
    if ((err = avformat_alloc_output_context2(&ofctx, oformat, nullptr, nullptr) < 0)) {
        cout  <<"Failed to allocate output context"<< endl;
        //Free();
        return;
    }
    cout << "oformat->video_codec " << oformat->video_codec << endl;
    if (!(codec = avcodec_find_encoder(oformat->video_codec))) {
        cout <<"Failed to find encoder"<< endl;
        //Free();
        return;
    }

    if (!(videoStream = avformat_new_stream(ofctx, codec))) {
        cout <<"Failed to create new stream"<< endl;
        //Free();
        return;
    }

    if (!(cctx = avcodec_alloc_context3(codec))) {
        cout <<"Failed to allocate codec context"<< endl;
        //Free();
        return;
    }

    videoStream->codecpar->codec_id = oformat->video_codec;
    videoStream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
    videoStream->codecpar->width = width;
    videoStream->codecpar->height = height;
    videoStream->codecpar->format = AV_PIX_FMT_RGBA;
    videoStream->codecpar->bit_rate = bitrate * 1000;
    videoStream->time_base = { 1, fps };

    avcodec_parameters_to_context(cctx, videoStream->codecpar);
    cctx->time_base = { 1, fps };
    cctx->max_b_frames = 2;
    cctx->gop_size = 12;
    if (videoStream->codecpar->codec_id == AV_CODEC_ID_PNG) {
        //av_opt_set(cctx, "preset", "ultrafast", 0);
    }
    if (ofctx->oformat->flags & AVFMT_GLOBALHEADER) {
        cctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
    }
    avcodec_parameters_from_context(videoStream->codecpar, cctx);

    if ((err = avcodec_open2(cctx, codec, nullptr)) < 0) {
        cout <<"Failed to open codec"<< endl;
        Free();
        return;
    }
    
    ofctx->pb = avio_out;
    
    ofctx->flags=AVFMT_FLAG_CUSTOM_IO;
    if ((err = avformat_write_header(ofctx, nullptr)) < 0) {
        cout <<"Failed to write header"<< endl;
        Free();
        return;
    }

    //av_dump_format(ofctx, 0, VIDEO_TMP_FILE, 1);
    cout << "init com" << endl;
}

void VideoCapture::AddFrame(uint8_t *data) {
    int err;
    if (!videoFrame) {

        videoFrame = av_frame_alloc();
        videoFrame->format = AV_PIX_FMT_RGBA;
        videoFrame->width = cctx->width;
        videoFrame->height = cctx->height;

        if ((err = av_frame_get_buffer(videoFrame, 32)) < 0) {
            cout <<"Failed to allocate picture"<< endl;
            return;
        }
    }
    cout << "finish" << endl;
    if (!swsCtx) {
        swsCtx = sws_getContext(cctx->width, cctx->height, AV_PIX_FMT_RGBA, cctx->width, cctx->height, AV_PIX_FMT_RGBA, SWS_BICUBIC, 0, 0, 0);
    }

    int inLinesize[1] = { 4 * cctx->width};

    
    sws_scale(swsCtx, (const uint8_t * const *)&data, inLinesize, 0, cctx->height, videoFrame->data, videoFrame->linesize);

    videoFrame->pts = (1.0 / 30.0) * 90000 * (frameCounter++);;

    if ((err = avcodec_send_frame(cctx, videoFrame)) < 0) {
        cout <<"Failed to send frame"<< endl;
        return;
    }

    AVPacket pkt;
    av_init_packet(&pkt);
    pkt.data = nullptr;
    pkt.size = 0;

    if (avcodec_receive_packet(cctx, &pkt) == 0) {
        pkt.flags |= AV_PKT_FLAG_KEY;
        av_interleaved_write_frame(ofctx, &pkt);
        av_packet_unref(&pkt);
    }
}

void VideoCapture::Finish() {
    
    AVPacket pkt;
    av_init_packet(&pkt);
    pkt.data = nullptr;
    pkt.size = 0;

    for (;;) {
        avcodec_send_frame(cctx, nullptr);
        if (avcodec_receive_packet(cctx, &pkt) == 0) {
            av_interleaved_write_frame(ofctx, &pkt);
            av_packet_unref(&pkt);
        }
        else {
            break;
        }
    }
    
    
    av_write_trailer(ofctx);
    /*
    if (!(oformat->flags & AVFMT_NOFILE)) {
        int err = avio_close(ofctx->pb);
        if (err < 0) {
            cout <<"Failed to close file"<< endl;
        }
    }
    */
    fp_write = fopen(VIDEO_TMP_FILE, "wb");
    if (!feof(fp_write)) {
        int true_size = fwrite(bd->ptr, 1, bd->size, fp_write);
        std::cout << true_size << std::endl;
    }
    fcloseall();

    //Remux();
    //Free();
}

void VideoCapture::Free() {
    if (videoFrame) {
        //std::cout << "videoFrame " << std::endl;
        av_frame_free(&videoFrame);
    }
    if (cctx) {
        //std::cout << "cctx" << std::endl;
        avcodec_free_context(&cctx);
    }
    if (ofctx) {
        //std::cout << "ofctx" << ofctx << std::endl;
        avformat_free_context(ofctx);
    }
    if (swsCtx) {
        //std::cout << "swsCtx" << std::endl;
        sws_freeContext(swsCtx);
    }
    /*
    if (bd->ptr != (void*)0)
    {
        free(bd->ptr);
    }
    free(bd);*/
}

static int read_packet(void *opaque, uint8_t *buf, int buf_size)
{
    struct buffer_data *bd = (struct buffer_data *)opaque;
    buf_size = FFMIN(buf_size, bd->size);
    if(buf_size == 0) return -1;
    //printf("read ptr:%p size:%zu\n", bd->ptr, bd->size);
    /* copy internal buffer data to buf */
    memcpy(buf, bd->ptr, buf_size);
    bd->ptr  += buf_size;
    bd->size -= buf_size;
    return buf_size;
}

void VideoCapture::Remux() {
    AVFormatContext *ifmt_ctx = nullptr, *ofmt_ctx = nullptr;
    int err;

    unsigned char* inbuffer=nullptr;
    inbuffer=(unsigned char*)av_malloc(32768);
    ifmt_ctx = avformat_alloc_context();
    AVIOContext *avio_in =avio_alloc_context(inbuffer, 32768 ,0,bd,read_packet,nullptr,nullptr);
    ifmt_ctx->pb=avio_in;
    
    if (!(oformat = av_guess_format(nullptr, nullptr, "h264"))) {
        cout << "Failed to define output format";
        return;
    }

    if ((err = avformat_open_input(&ifmt_ctx, "nullptr", 0, 0)) < 0) {
        cout <<"Failed to open input file for remuxing"<< endl;
    }
    if ((err = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
        cout <<"Failed to retrieve input stream information"<< endl;
    }
    if ((err = avformat_alloc_output_context2(&ofmt_ctx, oformat, nullptr, nullptr))) {
        cout <<"Failed to allocate output context"<< endl;
    }

    AVStream *inVideoStream = ifmt_ctx->streams[0];
    AVStream *outVideoStream = avformat_new_stream(ofmt_ctx, nullptr);
    if (!outVideoStream) {
        cout <<"Failed to allocate output video stream" << endl;
    }
    outVideoStream->time_base = { 1, fps };
    avcodec_parameters_copy(outVideoStream->codecpar, inVideoStream->codecpar);
    outVideoStream->codecpar->codec_tag = 0;
    
    uint8_t *outbuffer=nullptr;
    outbuffer=(uint8_t*)av_malloc(32768);
    res_video = (struct buffer_data*)malloc(sizeof(struct buffer_data));
    res_video->ptr = (uint8_t*)av_malloc(100000000);
    res_video->size = 0;

    if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
        avio_out =avio_alloc_context(outbuffer, 32768,1, res_video, nullptr, write_packet, nullptr);
        ofmt_ctx->pb = avio_out;
    }
    AVDictionary* opts = nullptr;
    av_dict_set(&opts, "movflags", "frag_keyframe+empty_moov", 0);
    if ((err = avformat_write_header(ofmt_ctx, &opts)) < 0) {
        cout <<"Failed to write header to output file"<< endl;
    }

    AVPacket videoPkt;
    int ts = 0;
    while (true) {
        if ((err = av_read_frame(ifmt_ctx, &videoPkt)) < 0) {
            break;
        }
        videoPkt.stream_index = outVideoStream->index;
        videoPkt.pts = ts;
        videoPkt.dts = ts;
        videoPkt.duration = av_rescale_q(videoPkt.duration, inVideoStream->time_base, outVideoStream->time_base);
        ts += videoPkt.duration;
        videoPkt.pos = -1;
        if ((err = av_interleaved_write_frame(ofmt_ctx, &videoPkt)) < 0) {
            cout <<"Failed to mux packet"<< endl;
            av_packet_unref(&videoPkt);
            break;
        }
        av_packet_unref(&videoPkt);
    }

    av_write_trailer(ofmt_ctx);
    cout << "res_video->size " << res_video->size << endl;
    fp_write=fopen(FINAL_FILE_NAME,"wb");
    if(!feof(fp_write)){
        int true_size=fwrite(res_video->ptr,1, res_video->size,fp_write);
        std::cout << true_size << std::endl;
    }
    fcloseall();
}
ohfgkhjo

ohfgkhjo1#

我已经解决了这个问题。显示muxer mov支持视频编解码器h264。所以我使用webm,它支持vp 9。vp 9支持yuva 420 p

Muxer webm [WebM]:
    Common extensions: webm.
    Mime type: video/webm.
    Default video codec: vp9.
    Default audio codec: vorbis.
    Default subtitle codec: webvtt.

Encoder libvpx-vp9 [libvpx VP9]:
    General capabilities: delay threads
    Threading capabilities: other
    Supported pixel formats: yuv420p yuva420p yuv422p yuv440p yuv444p yuv420p10le yuv422p10le yuv440p10le yuv444p10le yuv420p12le yuv422p12le yuv440p12le yuv444p12le gbrp gbrp10le gbrp12le

我改密码

oformat = av_guess_format("mov", nullptr, nullptr)

oformat = av_guess_format("webm", nullptr, nullptr)

注意:在解码网页时。原生VP 8/9解码器不解码alpha,但libvpx可以,ffprobe在可用时使用原生解码器进行流解码。目前,还没有一个工具可以强制为流指定一个特定的解码器,所以你必须使用解码器libvpx-vp 9。像这样:

ffmpeg -vcodec libvpx-vp9 -i tmp.webm img_%04d.png

或者在代码中:

AVCodec* pCodec = avcodec_find_decoder_by_name("libvpx-vp9");

请参见票:VP9 will not encode in yuva420p
ffprobe does not recognize yuva420p format (alpha channel) for webm

相关问题