我有相同的代码,可以在环境中成功推送流(rtmp),但在Android环境中,我失败并报错。报错方法是ffmpeg6.1中的avcodec_send_frame。顺便说一下,我自己编译了Android上的FFmpeg库,并下载了Win11的官方包。我将在下面提供Android和Win11的代码。
机器人:
static void encode(AVCodecContext *enc_ctx, AVFrame *frame, AVPacket *pkt,
AVFormatContext *outFormatCtx) {
int ret;
/* send the frame to the encoder */
if (frame)
LOGE2("Send frame %ld\n", frame->pts);
ret = avcodec_send_frame(enc_ctx, frame);
if (ret < 0) {
char errbuf[AV_ERROR_MAX_STRING_SIZE];
av_strerror(ret, errbuf, AV_ERROR_MAX_STRING_SIZE);
LOGE2("Error sending a frame for encoding ,%s\n", errbuf);
// exit(1);
return;
}
while (ret >= 0) {
ret = avcodec_receive_packet(enc_ctx, pkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
return;
else if (ret < 0) {
fprintf(stderr, "Error during encoding\n");
exit(1);
}
printf("Write packet (size=%5d)\n", pkt->pts);
/* ret = av_interleaved_write_frame(outFormatCtx, pkt);
if (ret < 0) {
LOGE2("write frame err=%s", av_err2str(ret));
break;
}*/
// printf("Write packet %3"PRId64" (size=%5d)\n", pkt->pts, pkt->size);
av_write_frame(outFormatCtx, pkt); // Write the packet to the RTMP stream
av_packet_unref(pkt);
}
}
PUSHER_FUNC(int, testPush, jstring yuvPath, jstring outputPath) {
const char *yvu_path = env->GetStringUTFChars(yuvPath, JNI_FALSE);
const char *output_path = env->GetStringUTFChars(outputPath, JNI_FALSE);
const char *rtmp_url = output_path;
const AVCodec *codec;
AVCodecContext *codecContext = NULL;
AVFormatContext *outFormatCtx;
int ret = 0;
AVStream *outStream;
AVFrame *frame;
AVPacket *pkt;
int i, x, y;
avformat_network_init();
codec = avcodec_find_encoder(AV_CODEC_ID_H264);
// codec = avcodec_find_encoder(AV_CODEC_ID_MPEG4);
// codec = avcodec_find_encoder(AV_CODEC_ID_H265);
if (!codec) {
LOGE2("JNI Error finding H.264 encoder");
return -1;
}
codecContext = avcodec_alloc_context3(codec);
if (!codecContext) {
fprintf(stderr, "Could not allocate video codec context\n");
return -1;
}
/* Allocate the output context */
outFormatCtx = avformat_alloc_context();
if (!outFormatCtx) {
fprintf(stderr, "Could not allocate output context\n");
return -1;
}
/* Open the RTMP output */
const AVOutputFormat *ofmt = av_guess_format("flv", NULL, NULL);
// const AVOutputFormat *ofmt = av_guess_format("mpegts", NULL, NULL);
// const AVOutputFormat *ofmt = av_guess_format("mp4", NULL, NULL);
if (!ofmt) {
fprintf(stderr, "Could not find output format\n");
return -1;
}
outFormatCtx->oformat = ofmt;
outFormatCtx->url = av_strdup(rtmp_url);
/* Add a video stream */
outStream = avformat_new_stream(outFormatCtx, codec);
if (!outStream) {
fprintf(stderr, "Could not allocate stream\n");
return -1;
}
outStream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
outStream->codecpar->codec_id = codec->id;
outStream->codecpar->width = 352;
outStream->codecpar->height = 288;
/* Set the output URL */
av_dict_set(&outFormatCtx->metadata, "url", rtmp_url, 0);
pkt = av_packet_alloc();
if (!pkt)
return -1;
/* ... (rest of the setup code) ... */
/* put sample parameters */
codecContext->bit_rate = 400000;
/* resolution must be a multiple of two */
codecContext->width = 352;
codecContext->height = 288;
/* frames per second */
codecContext->time_base = (AVRational) {1, 25};
codecContext->framerate = (AVRational) {25, 1};
/* emit one intra frame every ten frames
* check frame pict_type before passing frame
* to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
* then gop_size is ignored and the output of encoder
* will always be I frame irrespective to gop_size
*/
codecContext->gop_size = 10;
codecContext->max_b_frames = 1;
codecContext->pix_fmt = AV_PIX_FMT_YUV420P;
if (codec->id == AV_CODEC_ID_H264)
av_opt_set(codecContext->priv_data, "preset", "slow", 0);
/* open it */
ret = avcodec_open2(codecContext, codec, NULL);
if (ret < 0) {
LOGE2("JNI Error opening codec eer%s", av_err2str(ret));
return ret;
}
avcodec_parameters_to_context(codecContext, outStream->codecpar);
if (avio_open(&outFormatCtx->pb, rtmp_url, AVIO_FLAG_WRITE)) {
fprintf(stderr, "Could not open output\n");
return ret;
}
/* Write the header */
if (avformat_write_header(outFormatCtx, NULL) != 0) {
fprintf(stderr, "Error occurred when opening output\n");
return ret;
}
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
return -1;
}
frame->format = codecContext->pix_fmt;
frame->format = AV_PIX_FMT_YUV420P;
frame->format = 0;
frame->width = codecContext->width;
frame->height = codecContext->height;
ret = av_frame_get_buffer(frame, 0);
if (ret < 0) {
fprintf(stderr, "Could not allocate the video frame data ,%s\n", av_err2str(ret));
return ret;
}
/* FILE *yuv_file = fopen(yvu_path, "rb");
if (yuv_file == NULL) {
LOGE2("cannot open h264 file");
return -1;
}*/
/* encode 1 second of video */
for (i = 0; i < 25000; i++) {
// for (i = 0; i < 25; i++) {
// fflush(stdout);
/* make sure the frame data is writable */
ret = av_frame_make_writable(frame);
if (ret < 0)
exit(1);
/* prepare a dummy image */
/* Y */
for (y = 0; y < codecContext->height; y++) {
for (x = 0; x < codecContext->width; x++) {
frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
}
}
/* Cb and Cr */
for (y = 0; y < codecContext->height / 2; y++) {
for (x = 0; x < codecContext->width / 2; x++) {
frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
}
}
frame->pts = i;
/* encode the image */
encode(codecContext, frame, pkt, outFormatCtx);
}
// fclose(yuv_file);
/* flush the encoder */
encode(codecContext, NULL, pkt, outFormatCtx);
/* Write the trailer */
av_write_trailer(outFormatCtx);
/* Close the output */
avformat_free_context(outFormatCtx);
avcodec_free_context(&codecContext);
av_frame_free(&frame);
av_packet_free(&pkt);
}
字符串
win11:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/opt.h>
#include <libavutil/imgutils.h>
#include <libavutil/time.h>
static void encode(AVCodecContext *enc_ctx, AVFrame *frame, AVPacket *pkt,
AVFormatContext *outFormatCtx) {
int ret;
/* send the frame to the encoder */
if (frame)
printf("Send frame %3"PRId64"\n", frame->pts);
ret = avcodec_send_frame(enc_ctx, frame);
if (ret < 0) {
char errbuf[AV_ERROR_MAX_STRING_SIZE];
av_strerror(ret, errbuf, AV_ERROR_MAX_STRING_SIZE);
fprintf(stderr, "Error sending a frame for encoding ,%s\n", errbuf);
exit(1);
}
while (ret >= 0) {
ret = avcodec_receive_packet(enc_ctx, pkt);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
return;
else if (ret < 0) {
fprintf(stderr, "Error during encoding\n");
exit(1);
}
printf("Write packet %3"PRId64" (size=%5d)\n", pkt->pts, pkt->size);
av_write_frame(outFormatCtx, pkt); // Write the packet to the RTMP stream
av_packet_unref(pkt);
}
}
int main(int argc, char **argv) {
av_log_set_level(AV_LOG_DEBUG);
const char *rtmp_url, *codec_name;
const AVCodec *codec;
AVCodecContext *codecContext = NULL;
int i, ret, x, y;
AVFormatContext *outFormatCtx;
AVStream *st;
AVFrame *frame;
AVPacket *pkt;
uint8_t endcode[] = {0, 0, 1, 0xb7};
if (argc <= 3) {
fprintf(stderr, "Usage: %s <rtmp url> <codec name>\n", argv[0]);
exit(0);
}
rtmp_url = argv[1];
codec_name = argv[2];
avformat_network_init();
/* find the mpeg1video encoder */
// codec = avcodec_find_encoder_by_name(codec_name);
// codec = avcodec_find_encoder(AV_CODEC_ID_MPEG4);
// codec = avcodec_find_encoder(AV_CODEC_ID_VP9);
// codec = avcodec_find_encoder(AV_CODEC_ID_MPEG2VIDEO);
// codec = avcodec_find_encoder(AV_CODEC_ID_H264);
codec = avcodec_find_encoder(AV_CODEC_ID_H264);
// codec = avcodec_find_encoder(AV_CODEC_ID_AV1);
// codec = avcodec_find_encoder(AV_CODEC_ID_H265);
if (!codec) {
fprintf(stderr, "Codec '%s' not found\n", codec_name);
exit(1);
}
codecContext = avcodec_alloc_context3(codec);
if (!codecContext) {
fprintf(stderr, "Could not allocate video codec context\n");
exit(1);
}
/* Allocate the output context */
outFormatCtx = avformat_alloc_context();
if (!outFormatCtx) {
fprintf(stderr, "Could not allocate output context\n");
exit(1);
}
/* Open the RTMP output */
const AVOutputFormat *ofmt = av_guess_format("flv", NULL, NULL);
// const AVOutputFormat *ofmt = av_guess_format("MKV", NULL, NULL);
// const AVOutputFormat *ofmt = av_guess_format("rtmp", NULL, NULL);
// const AVOutputFormat *ofmt = av_guess_format("mpegts", NULL, NULL);
// const AVOutputFormat *ofmt = av_guess_format("mp4", NULL, NULL);
if (!ofmt) {
fprintf(stderr, "Could not find output format\n");
exit(1);
}
outFormatCtx->oformat = ofmt;
outFormatCtx->url = av_strdup(rtmp_url);
/* Add a video stream */
st = avformat_new_stream(outFormatCtx, codec);
if (!st) {
fprintf(stderr, "Could not allocate stream\n");
exit(1);
}
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->codecpar->codec_id = codec->id;
st->codecpar->width = 352;
st->codecpar->height = 288;
// st->codecpar = c;
// st->codecpar->format = AV_PIX_FMT_YUV420P;
// Set video stream parameters
// st->codecpar->framerate = (AVRational){25, 1};
/* Set the output URL */
av_dict_set(&outFormatCtx->metadata, "url", rtmp_url, 0);
pkt = av_packet_alloc();
if (!pkt)
exit(1);
/* ... (rest of the setup code) ... */
/* put sample parameters */
codecContext->bit_rate = 400000;
/* resolution must be a multiple of two */
codecContext->width = 352;
codecContext->height = 288;
/* frames per second */
codecContext->time_base = (AVRational) {1, 25};
codecContext->framerate = (AVRational) {25, 1};
/* emit one intra frame every ten frames
* check frame pict_type before passing frame
* to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
* then gop_size is ignored and the output of encoder
* will always be I frame irrespective to gop_size
*/
codecContext->gop_size = 10;
codecContext->max_b_frames = 1;
codecContext->pix_fmt = AV_PIX_FMT_YUV420P;
if (codec->id == AV_CODEC_ID_H264)
av_opt_set(codecContext->priv_data, "preset", "slow", 0);
/* open it */
ret = avcodec_open2(codecContext, codec, NULL);
if (ret < 0) {
fprintf(stderr, "Could not open codec: %s\n", av_err2str(ret));
exit(1);
}
avcodec_parameters_to_context(codecContext, st->codecpar);
if (avio_open(&outFormatCtx->pb, rtmp_url, AVIO_FLAG_WRITE)) {
fprintf(stderr, "Could not open output\n");
exit(1);
}
/* Write the header */
if (avformat_write_header(outFormatCtx, NULL) != 0) {
fprintf(stderr, "Error occurred when opening output\n");
exit(1);
}
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
// frame->format = c->pix_fmt;
// frame->format = AV_PIX_FMT_YUV420P;
frame->format = 0;
frame->width = codecContext->width;
frame->height = codecContext->height;
ret = av_frame_get_buffer(frame, 0);
if (ret < 0) {
fprintf(stderr, "Could not allocate the video frame data ,%s\n", av_err2str(ret));
exit(1);
}
/* encode 1 second of video */
for (i = 0; i < 2500; i++) {
/* ... (rest of the encoding loop) ... */
fflush(stdout);
/* make sure the frame data is writable */
ret = av_frame_make_writable(frame);
if (ret < 0)
exit(1);
/* prepare a dummy image */
/* Y */
for (y = 0; y < codecContext->height; y++) {
for (x = 0; x < codecContext->width; x++) {
frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
}
}
/* Cb and Cr */
for (y = 0; y < codecContext->height / 2; y++) {
for (x = 0; x < codecContext->width / 2; x++) {
frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
}
}
frame->pts = i;
/* encode the image */
encode(codecContext, frame, pkt, outFormatCtx);
}
/* flush the encoder */
encode(codecContext, NULL, pkt, outFormatCtx);
/* Write the trailer */
av_write_trailer(outFormatCtx);
/* Close the output */
avformat_free_context(outFormatCtx);
avcodec_free_context(&codecContext);
av_frame_free(&frame);
av_packet_free(&pkt);
return 0;
}
型
我怀疑是我编译的ffmpeg库有问题,所以我在GitHub上搜索了一个编译ffmpeg的步骤,但是它编译的包仍然有同样的问题,我现在不知道该怎么办。
1条答案
按热度按时间dm7nw8vv1#
我解决这个问题是为了防止别人遇到和我一样的问题,我在这里解释一下,之所以我更喜欢Win11在Android上运行,是因为我在Win11上使用的是动态库FFmpeg 5.1.2,而我自己编译的FFmpeg库是6.1,所以其中一个可以正常运行,而另一个不能,其次,在Android上出现此错误的原因是因为我的一个参数设置不正确。我只需要添加一个参数。
outStream->codecpar->format = AV_PIX_FMT_YUV420P;
添加上述参数后,它将在Android FFmpeg6.1上准备就绪。