一个快乐的野指针@ 2023-04-23 09:14 采纳率: 100%
浏览 64
已结题

ffmpeg的api使用

这是一个使用ffmpeg的api写的程序,出现了内存泄露问题,导致录制一段时间的视频之后,程序崩溃,我对api的使用还不太了解,能解释解释程序出了什么问题吗?应该怎么改呢

#include "ffmpegmananger.h"
#include <QThread>

const char *filter_descr = "drawtext=fontfile=simhei.ttf: text='Video add text by Dione.':x=80:y=20:fontsize=24:fontcolor=red:shadowy=2";
#define P_USE_FILTER 1

AVFilterContext *buffersink_ctx;
AVFilterContext *buffersrc_ctx;
const AVFilter *buffersrc;
const AVFilter *buffersink;

static int initFilters(AVFormatContext *ifmt_ctx)
{
    int ret;
    char args[512];
    AVFilterGraph *filter_graph = NULL;

    //初始化滤镜
    avfilter_register_all();
    buffersrc = avfilter_get_by_name("buffer");
    buffersink = avfilter_get_by_name("buffersink");

    AVFilterInOut *outputs = avfilter_inout_alloc();
    if (!outputs)
    {
        printf("Cannot alloc output\n");
        return -1;
    }
    AVFilterInOut *inputs = avfilter_inout_alloc();
    if (!inputs)
    {
        printf("Cannot alloc input\n");
        return -1;
    }

    if (filter_graph)
        avfilter_graph_free(&filter_graph);
    filter_graph = avfilter_graph_alloc();
    if (!filter_graph)
    {
        printf("Cannot create filter graph\n");
        return -1;
    }

    /* buffer video source: the decoded frames from the decoder will be inserted here. */
    snprintf(args, sizeof(args),
        "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
        ifmt_ctx->streams[0]->codec->width, ifmt_ctx->streams[0]->codec->height, ifmt_ctx->streams[0]->codec->pix_fmt,
        ifmt_ctx->streams[0]->time_base.num, ifmt_ctx->streams[0]->time_base.den,
        ifmt_ctx->streams[0]->codec->sample_aspect_ratio.num, ifmt_ctx->streams[0]->codec->sample_aspect_ratio.den);


    ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
        args, NULL, filter_graph);
    if (ret < 0) {
        printf("Cannot create buffer source\n");
        return ret;
    }

    /* buffer video sink: to terminate the filter chain. */
    ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
        NULL, NULL, filter_graph);
    if (ret < 0) {
        printf("Cannot create buffer sink\n");
        return ret;
    }

    /* Endpoints for the filter graph. */
    outputs->name = av_strdup("in");
    outputs->filter_ctx = buffersrc_ctx;
    outputs->pad_idx = 0;
    outputs->next = NULL;

    inputs->name = av_strdup("out");
    inputs->filter_ctx = buffersink_ctx;
    inputs->pad_idx = 0;
    inputs->next = NULL;

    if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_descr,
        &inputs, &outputs, NULL)) < 0)
        return ret;

    if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
        return ret;

    avfilter_inout_free(&inputs);
    avfilter_inout_free(&outputs);

    return 0;
}



ffmpegMananger::ffmpegMananger(QObject *parent ):
    QObject(parent)
{
    m_pInFmtCtx = nullptr;
    m_pTsFmtCtx  = nullptr;
    m_qstrRtspURL = "";
    m_qstrOutPutFile = "";
}
ffmpegMananger::~ffmpegMananger()
{
    avformat_free_context(m_pInFmtCtx);
    avformat_free_context(m_pTsFmtCtx);
}

void ffmpegMananger::getRtspURL(QString strRtspURL)
{
    this->m_qstrRtspURL = strRtspURL;
}
void ffmpegMananger::getOutURL(QString strRute)
{
    this->m_qstrOutPutFile = strRute;
    printf("===========%s\n",m_qstrOutPutFile.toStdString().c_str());
}
void ffmpegMananger::setOutputCtx(AVCodecContext *encCtx, AVFormatContext **pTsFmtCtx,int &nVideoIdx_out)
{
    avformat_alloc_output_context2(pTsFmtCtx , nullptr, nullptr, m_qstrOutPutFile.toStdString().c_str());
    if (!pTsFmtCtx ) {
        printf("Could not create output context\n");
        return;
    }
    if (avio_open(&((*pTsFmtCtx)->pb), m_qstrOutPutFile.toStdString().c_str(), AVIO_FLAG_READ_WRITE) < 0)
    {
       avformat_free_context(*pTsFmtCtx);
       printf("avio_open fail.");
       return;
    }
    AVStream *out_stream = avformat_new_stream(*pTsFmtCtx, encCtx->codec);
    nVideoIdx_out = out_stream->index;
    //nVideoIdx_out = out_stream->index;
    avcodec_parameters_from_context(out_stream->codecpar, encCtx);
    printf("==========Output Information==========\n");
    av_dump_format(*pTsFmtCtx, 0, m_qstrOutPutFile.toStdString().c_str(), 1);
    printf("======================================\n");
}
int ffmpegMananger::ffmepgInput()
{
    int nRet = 0;
    AVCodecContext *encCtx = nullptr;//编码器
    //const char *pUrl = "D:/videos/264.dat";
    std::string temp = m_qstrRtspURL.toStdString();
    const char *pUrl = temp.c_str();
    printf("===========%s\n",pUrl);
    AVDictionary *options = nullptr;
    av_dict_set(&options,"rtsp_transport", "tcp", 0);//设置为rtsp传输模式
    av_dict_set(&options,"stimeout","10000000",0);//在RTSP协议中,客户端发送请求后,需要等待服务器的响应,如果在超时时间(这里设置为10s)内未收到响应,客户端会重新发起请求。
    // 设置“buffer_size”缓存容量
    av_dict_set(&options, "buffer_size", "1024000", 0);//设置视频的缓冲器大小
    nRet = avformat_open_input(&m_pInFmtCtx,pUrl,nullptr,&options);//打开视频流,在成功打开输入文件后,此指针(m_pInFmtCtx)将指向被创建的AVFormatContext对象。
    if( nRet < 0)//成功打开返回0,失败返回负值
    {
        printf("Could not open input file,===========keep trying \n");
        return nRet;
    }
    avformat_find_stream_info(m_pInFmtCtx, nullptr);//将 m_pInFmtCtx 中的输入视频流的基本信息填充到 m_pInFmtCtx 中。由于不需要传递任何选项,因此第二个参数设置为 nullptr。
    printf("===========Input Information==========\n");
    av_dump_format(m_pInFmtCtx, 0, pUrl, 0);//打印一下流的信息,这句应该可要可不要
    printf("======================================\n");
    //1.获取视频流编号
    int nVideo_indx = av_find_best_stream(m_pInFmtCtx,AVMEDIA_TYPE_VIDEO,-1,-1,nullptr,0);//查找最佳的视频流
    if(nVideo_indx < 0)
    {
        avformat_free_context(m_pInFmtCtx);
        printf("查找解码器失败\n");
        return -1;
    }
    //2.查找解码器
    AVCodec *pInCodec = avcodec_find_decoder(m_pInFmtCtx->streams[nVideo_indx]->codecpar->codec_id);
    if(nullptr == pInCodec)
    {
        printf("avcodec_find_decoder fail.");
        return -1;
    }
    //获取解码器上下文
    AVCodecContext* pInCodecCtx = avcodec_alloc_context3(pInCodec);
    //复制解码器参数
    nRet = avcodec_parameters_to_context(pInCodecCtx, m_pInFmtCtx->streams[nVideo_indx]->codecpar);
    if(nRet < 0)
    {

        avcodec_free_context(&pInCodecCtx);
        printf("avcodec_parameters_to_context fail.");
        return -1;
    }
    //打开解码器
    if(avcodec_open2(pInCodecCtx, pInCodec, nullptr) < 0)
    {
        avcodec_free_context(&pInCodecCtx);
        printf("Error: Can't open codec!\n");
        return -1;
    }
    printf("width = %d\n", pInCodecCtx->width);
    printf("height = %d\n", pInCodecCtx->height);

//    videoWidth = 1920;
//    videoHeight = 1080;


    int frame_index = 0;
    int got_picture = 0;
    AVStream *in_stream =nullptr;//输入流
    AVStream *out_stream =nullptr;
    AVFrame *pFrame= av_frame_alloc();//输入帧,输出帧
    AVPacket *newpkt = av_packet_alloc();
    AVPacket *packet = av_packet_alloc();
    av_init_packet(newpkt);
    av_init_packet(packet);
    // alloc AVFrame
    AVFrame*pFrameRGB = av_frame_alloc();
    // 图像色彩空间转换、分辨率缩放、前后图像滤波处理
    SwsContext *m_SwsContext = sws_getContext(pInCodecCtx->width, pInCodecCtx->height,
            pInCodecCtx->pix_fmt, pInCodecCtx->width, pInCodecCtx->height,
            AV_PIX_FMT_RGB32, SWS_BICUBIC, nullptr, nullptr, nullptr);

    int bytes = av_image_get_buffer_size(AV_PIX_FMT_RGB32, pInCodecCtx->width, pInCodecCtx->height,4);
    uint8_t *m_OutBuffer = (uint8_t *)av_malloc(bytes * sizeof(uint8_t));

    // 将分配的内存空间给pFrameRGB使用
    avpicture_fill((AVPicture *)pFrameRGB, m_OutBuffer, AV_PIX_FMT_RGB32, pInCodecCtx->width, pInCodecCtx->height);
    if(encCtx == nullptr)
    {
        //打开编码器
        openEncoder(pInCodecCtx->width, pInCodecCtx->height,&encCtx);
    }
    int videoindex_out = 0;
    //设置输出文件上下文
    setOutputCtx(encCtx,&m_pTsFmtCtx,videoindex_out);
    //Write file header
    if (avformat_write_header(m_pTsFmtCtx, nullptr) < 0)
    {
        avformat_free_context(m_pTsFmtCtx);
        printf("Error occurred when opening output file\n");
        return -1;
    }
    printf("==============writer trail===================.\n");
    int count = 0;
    nRet = 0;


    //initFilter();
    //----------------------------------------------------------------------------播放------------------------------------------------------------------------------------------------------------------------
    while(av_read_frame(m_pInFmtCtx, packet) >= 0)//从pInFmtCtx读H264数据到packet;
    {
        //initFilter();
        if(packet->stream_index != nVideo_indx)//仅保留图像
        {
            continue;
        }
        if(avcodec_send_packet(pInCodecCtx, packet)<0)//送packet中H264数据给解码器码器进行解码,解码好的YUV数据放在pInCodecCtx,
        {
           break;
        }
        av_packet_unref(packet);
        got_picture = avcodec_receive_frame(pInCodecCtx, pFrame);//把解码好的YUV数据放到pFrame中



        if(0 == got_picture)//解码好一帧数据
        {
            initFilters(m_pInFmtCtx);//调用初始化滤镜函数

          #if P_USE_FILTER
            pFrame->pts = av_frame_get_best_effort_timestamp(pFrame);
            if (av_buffersrc_add_frame(buffersrc_ctx, pFrame) < 0) {
                qDebug("Error while feeding the filtergraph\n");
                break;
            }
            while (1) {
                int ret = av_buffersink_get_frame_flags(buffersink_ctx, pFrame, 0);
                if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
                    break;
                if (ret < 0)
                    return -1;
                if(pFrame)
                {
                    sws_scale(m_SwsContext,(uint8_t const*const*)pFrame->data,pFrame->linesize,0,pInCodecCtx->height,pFrameRGB->data,pFrameRGB->linesize);
                    // 转换到QImage
                    QImage tmmImage((uchar *)m_OutBuffer, pInCodecCtx->width, pInCodecCtx->height, QImage::Format_RGB32);
                    QImage image = tmmImage.copy();
                    emit Sig_GetOneFrame(image);
                }
                //av_frame_unref(pFrame);
            }



          #else

            //发送显示图像的信号
            // 对解码视频帧进行缩放、格式转换等操作
            sws_scale(m_SwsContext, (uint8_t const * const *)pFrame->data,
                     pFrame->linesize, 0, pInCodecCtx->height,
                     pFrameRGB->data, pFrameRGB->linesize);

            // 转换到QImage
            QImage tmmImage((uchar *)m_OutBuffer, pInCodecCtx->width, pInCodecCtx->height, QImage::Format_RGB32);
            QImage image = tmmImage.copy();

            // 发送QImage
            emit Sig_GetOneFrame(image);
            #endif

            setDecoderPts(newpkt->stream_index,count, pFrame);
            count++;
            //送原始数据给编码器进行编码
            nRet = avcodec_send_frame(encCtx,pFrame);
            if(nRet < 0)
            {
                continue;
            }
            //从编码器获取编号的数据
            while(nRet >= 0)
            {
                nRet = avcodec_receive_packet(encCtx,newpkt);
                if(nRet < 0)
                {
                    break;
                }
                setEncoderPts(nVideo_indx,frame_index,videoindex_out,newpkt);
                int _count = 1;
                printf("Write %d Packet. size:%5d\tpts:%lld\n", _count,newpkt->size, newpkt->pts);

                if (av_interleaved_write_frame(m_pTsFmtCtx, newpkt) < 0)
                {
                    printf("Error muxing packet\n");
                    goto end;
                }
                _count++;
                av_packet_unref(newpkt);
            }
        }
    }


    //-----------------------------------------------------------------------保存----------------------------------------------------------------------------------
    while(1)//从pInFmtCtx读H264数据到packet;
    {
        if(packet->stream_index != nVideo_indx)//仅保留图像
        {
            continue;
        }
        if(avcodec_send_packet(pInCodecCtx, packet)<0)//送packet中H264数据给解码器码器进行解码,解码好的YUV数据放在pInCodecCtx,
        {
            continue;
        }
        av_packet_unref(packet);
        got_picture = avcodec_receive_frame(pInCodecCtx, pFrame);//把解码好的YUV数据放到pFrame中
        if(!got_picture)//解码好一帧数据
        {
            AVRational in_time_base1 = in_stream->time_base;
            in_stream = m_pInFmtCtx->streams[newpkt->stream_index];

            //Duration between 2 frames (us)
            int64_t in_duration = (double)AV_TIME_BASE / av_q2d(in_stream->r_frame_rate);
            pFrame->pts = (double)(count*in_duration) / (double)(av_q2d(in_time_base1)*AV_TIME_BASE);
            count++;
            //送原始数据给编码器进行编码
            nRet = avcodec_send_frame(encCtx,pFrame);
            if(nRet < 0)
            {
                break;
            }
            //从编码器获取编号的数据
            while(nRet >= 0)
            {
                nRet = avcodec_receive_packet(encCtx,newpkt);
                if(nRet < 0)
                {
                    continue;
                }
                in_stream = m_pInFmtCtx->streams[newpkt->stream_index];
                out_stream = m_pTsFmtCtx->streams[videoindex_out];
                if (newpkt->stream_index == nVideo_indx)
                {
                    //FIX:No PTS (Example: Raw H.264)
                    //Simple Write PTS
                    if (newpkt->pts == AV_NOPTS_VALUE)
                    {
                        //Write PTS
                        AVRational time_base1 = in_stream->time_base;
                        //Duration between 2 frames (us)
                        int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(in_stream->r_frame_rate);
                        //Parameters
                        newpkt->pts = (double)(frame_index*calc_duration) / (double)(av_q2d(time_base1)*AV_TIME_BASE);
                        newpkt->dts = newpkt->pts;
                        newpkt->duration = (double)calc_duration / (double)(av_q2d(time_base1)*AV_TIME_BASE);
                        frame_index++;
                    }
                 }
                //Convert PTS/DTS
                newpkt->pts = av_rescale_q_rnd(newpkt->pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
                newpkt->dts = av_rescale_q_rnd(newpkt->dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
                newpkt->duration = av_rescale_q(newpkt->duration, in_stream->time_base, out_stream->time_base);
                newpkt->pos = -1;
                newpkt->stream_index = videoindex_out;
                int count = 1;
                printf("Write %d Packet. size:%5d\tpts:%lld\n", count,newpkt->size, newpkt->pts);

                if (av_interleaved_write_frame(m_pTsFmtCtx, newpkt) < 0)
                {
                    printf("Error muxing packet\n");
                    goto end;
                }
                count++;
                av_packet_unref(newpkt);
            }
        }
    }
    //Write file trailer
    av_write_trailer(m_pTsFmtCtx);
end:
    av_frame_free(&pFrame);
    av_frame_free(&pFrameRGB);
    av_packet_unref(newpkt);
    av_packet_unref(packet);
    std::cout<<"rtsp's h264 to ts end";  
    return  0;
}
void ffmpegMananger::setDecoderPts(int idx,int count,AVFrame *pFrame)
{
    AVStream* in_stream = m_pInFmtCtx->streams[idx];
    AVRational in_time_base1 = in_stream->time_base;
    //Duration between 2 frames (us)
    int64_t in_duration = (double)AV_TIME_BASE / av_q2d(in_stream->r_frame_rate);
    pFrame->pts = (double)(count*in_duration) / (double)(av_q2d(in_time_base1)*AV_TIME_BASE);
}
void ffmpegMananger::setEncoderPts(int nVideo_indx,int frame_index,int videoindex_out,AVPacket *newpkt)
{
    AVStream*in_stream = m_pInFmtCtx->streams[newpkt->stream_index];
    AVStream*out_stream = m_pTsFmtCtx->streams[videoindex_out];
    if (newpkt->stream_index == nVideo_indx)
    {
        //FIX:No PTS (Example: Raw H.264)
        //Simple Write PTS
        if (newpkt->pts == AV_NOPTS_VALUE)
        {
            //Write PTS
            AVRational time_base1 = in_stream->time_base;
            //Duration between 2 frames (us)
            int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(in_stream->r_frame_rate);
            //Parameters
            newpkt->pts = (double)(frame_index*calc_duration) / (double)(av_q2d(time_base1)*AV_TIME_BASE);
            newpkt->dts = newpkt->pts;
            newpkt->duration = (double)calc_duration / (double)(av_q2d(time_base1)*AV_TIME_BASE);
            frame_index++;
        }
     }
    //Convert PTS/DTS
    newpkt->pts = av_rescale_q_rnd(newpkt->pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
    newpkt->dts = av_rescale_q_rnd(newpkt->dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
    newpkt->duration = av_rescale_q(newpkt->duration, in_stream->time_base, out_stream->time_base);
    newpkt->pos = -1;
    newpkt->stream_index = videoindex_out;
}


void ffmpegMananger::writeTail()
{
    //Write file trailer
    av_write_trailer(m_pTsFmtCtx);
}
void ffmpegMananger::openEncoder(int width, int height, AVCodecContext** enc_ctx)
{
    //使用libx264编码器
    AVCodec * pCodec = avcodec_find_encoder_by_name("libx264");
    if(nullptr == pCodec)
    {
        printf("avcodec_find_encoder_by_name fail.\n");
        return;
    }
    //获取编码器上下文
    *enc_ctx = avcodec_alloc_context3(pCodec);
    if(nullptr == enc_ctx)
    {
        printf("avcodec_alloc_context3(pCodec) fail.\n");
        return;
    }
    //sps/pps
    (*enc_ctx)->profile = FF_PROFILE_H264_MAIN;
    (*enc_ctx)->level = 30;//表示level是5.0
    //分辨率
    (*enc_ctx)->width = width;
    (*enc_ctx)->height = height;
    //gop
    (*enc_ctx)->gop_size = 25;//i帧间隔
    (*enc_ctx)->keyint_min = 20;//设置最小自动插入i帧的间隔.OPTION
    //B帧
    (*enc_ctx)->max_b_frames = 0;//不要B帧
    (*enc_ctx)->has_b_frames = 0;//
    //参考帧
    (*enc_ctx)->refs = 3;//OPTION
    //设置输入的yuv格式
    (*enc_ctx)->pix_fmt = AV_PIX_FMT_YUV420P;
    //设置码率
    (*enc_ctx)->bit_rate = 3000000;
    //设置帧率
    (*enc_ctx)->time_base = (AVRational){1,25};//帧与帧之间的间隔
    (*enc_ctx)->framerate = (AVRational){25,1};//帧率 25帧每秒
    if(avcodec_open2((*enc_ctx),pCodec,nullptr) < 0)
    {
        printf("avcodec_open2 fail.\n");
    }
    return;
}


  • 写回答

2条回答 默认 最新

  • callinglove 2023-04-23 10:24
    关注
    ffmpegMananger::ffmepgInput
        avformat_open_input之后调用av_dict_free(&options); 不管成功与否
    
        avformat_write_header失败后申请的资源要一一释放
        av_frame_free(&pFrame);
        av_frame_free(&pFrameRGB);
        av_packet_free(&newpkt);
        av_packet_free(&packet);
        avcodec_free_context(&pInCodecCtx);
    
        m_pInFmtCtx,m_pTsFmtCtx在析构的释放的,所以在有错误的时候可以不释放
        
        av_read_frame读取成功,无论是不是你要的流,都要释放,packet->stream_index != nVideo_indx的时候和avcodec_send_packet调用失败的时候添加
        av_packet_unref(packet)
        
        对于pFrame为什么使用后不做 av_frame_unref处理呢
    
        最后释放不够彻底
        av_packet_free(&newpkt);
        av_packet_free(&packet);
        avcodec_free_context(&pInCodecCtx);
    

    建议参考ffmpeg的https://gitee.com/mirrors/ffmpeg/tree/master/doc/examples

    本回答被题主选为最佳回答 , 对您是否有帮助呢?
    评论
查看更多回答(1条)

报告相同问题?

问题事件

  • 系统已结题 5月1日
  • 已采纳回答 4月23日
  • 赞助了问题酬金15元 4月23日
  • 修改了问题 4月23日
  • 展开全部

悬赏问题

  • ¥15 黄永刚的晶体塑性子程序中输入的材料参数里的晶体取向参数是什么形式的?
  • ¥20 数学建模来解决我这个问题
  • ¥15 计算机网络ip分片偏移量计算头部是-20还是-40呀
  • ¥15 stc15f2k60s2单片机关于流水灯,时钟,定时器,矩阵键盘等方面的综合问题
  • ¥15 YOLOv8已有一个初步的检测模型,想利用这个模型对新的图片进行自动标注,生成labellmg可以识别的数据,再手动修改。如何操作?
  • ¥30 NIRfast软件使用指导
  • ¥20 matlab仿真问题,求功率谱密度
  • ¥15 求micropython modbus-RTU 从机的代码或库?
  • ¥15 django5安装失败
  • ¥15 Java与Hbase相关问题