weixin_36350180 2019-10-17 22:57 采纳率: 0%
浏览 509

FFmpeg C++ 的 avcodec_encode_video2 返回 -40

我想通過avcodec_encode_video2 覆蓋packet中的memory來達到刷新我做過特殊渲染的影像,但avcodec_encode_video2 的返回值是不成功-40問題: 我正常撥放輸出都沒問題 就是沒辦法蓋過圖層,是不是哪邊memory寫法不正確呢?

代碼如下:

AVOutputFormat* ofmt = NULL;
//Input AVFormatContext and Output AVFormatContext
AVFormatContext* i_pFormatCtx = NULL, * out_pFormatCtx = NULL;
AVCodecContext* pCodecCtx;//視頻解碼器
AVCodecContext* pCodecCtxAudio;//音頻解碼器
AVCodec* pCodec;
AVCodec* pCodecAudio;
AVPacket packet;
string in_filename;
string out_filename;
int ret, i;
int videoindex = -1;
int audioindex = -1;
int frame_index = 0;
int64_t start_time = 0;
uint8_t* buffer;
AVFrame* pFrame;
AVFrame* pFrameRGB;
int frameFinished;
int frameAudioFinished;
int numBytes;
AVStream* in_stream, * out_stream;
vector<AVPacket> BIGpacket;
in_filename = "D:/yolo/data_movie/f1.mp4";
out_filename = "rtmp://localhost:1935/live/home";//怀堤 URLㄗOutput URLㄘ[RTMP]
//================
// 註冊:
av_register_all();
//================
//Network
avformat_network_init();
//=======================
//Input
if ((ret = avformat_open_input(&i_pFormatCtx, in_filename.c_str(), 0, 0)) < 0) 
{
    //printf("Could not open input file.");
    goto end;
}
if ((ret = avformat_find_stream_info(i_pFormatCtx, 0)) < 0) {
    //printf("Failed to retrieve input stream information");
    goto end;
}

for (i = 0; i < i_pFormatCtx->nb_streams; i++)
{
    if (i_pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) 
    {
        videoindex = i;
    }
    if (i_pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
    {
        audioindex = i;
    }
}
av_dump_format(i_pFormatCtx, 0, in_filename.c_str(), 0);

//Output

avformat_alloc_output_context2(&out_pFormatCtx, NULL, "flv", out_filename.c_str()); //RTMP
if (!out_pFormatCtx)
{
    //printf("Could not create output context\n");
    ret = AVERROR_UNKNOWN;
    goto end;
}
ofmt = out_pFormatCtx->oformat;

for (i = 0; i < i_pFormatCtx->nb_streams; i++)
{
    //Create output AVStream according to input AVStream
    AVStream* in_stream = i_pFormatCtx->streams[i];
    AVStream* out_stream = avformat_new_stream(out_pFormatCtx, in_stream->codec->codec);
    if (!out_stream)
    {
        AfxMessageBox(L"Failed allocating output stream");
        //printf("Failed allocating output stream\n");
        ret = AVERROR_UNKNOWN;
        goto end;
    }
    //Copy the settings of AVCodecContext
    ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
    if (ret < 0)
    {
        //printf("Failed to copy context from input to output stream codec context\n");
        goto end;
    }
    out_stream->codec->codec_tag = 0;
    if (out_pFormatCtx->oformat->flags & AVFMT_GLOBALHEADER)
        out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}

//======================================================================
// 打开視頻解码器
pCodecCtx = i_pFormatCtx->streams[videoindex]->codec;
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (avcodec_open2(pCodecCtx, pCodec, 0) < 0)
{
    DbgPrint("Could not open codec");
    return;
}
//======================================================================
// 打开音頻解码器

pCodecCtxAudio= i_pFormatCtx->streams[audioindex]->codec;
pCodecAudio= avcodec_find_decoder(pCodecCtxAudio->codec_id);
if (avcodec_open2(pCodecCtxAudio, pCodecAudio, 0) < 0)
{
    DbgPrint("Could not open codec");
    return;
}

#if OUTPUT_PCM
pFile = fopen("output.pcm", "wb");
#endif**

packet = *(AVPacket*)av_malloc(sizeof(AVPacket));
av_init_packet(&packet);


//=====================================================================
// 破解視頻某些编解码器可能生成的错误帧速率
if (pCodecCtx->time_base.num > 1000 && pCodecCtx->time_base.den == 1)
{
    pCodecCtx->time_base.den = 1000;
}
// 分配视频帧
pFrame = av_frame_alloc();  // Allocate an AVFrame structure
pFrameRGB = av_frame_alloc();
if (pFrameRGB == NULL)
    return;
numBytes = avpicture_get_size(AV_PIX_FMT_RGB24, pCodecCtx->width,pCodecCtx->height);// Determine required buffer size and allocate buffer
buffer = (uint8_t*)av_malloc(numBytes * sizeof(uint8_t));
avpicture_fill((AVPicture*)pFrameRGB, buffer, AV_PIX_FMT_RGB24,pCodecCtx->width, pCodecCtx->height);// Assign appropriate parts of buffer to image planes in pFrameRGB
long prepts = 0;
//Dump Format------------------
av_dump_format(out_pFormatCtx, 0, out_filename.c_str(), 1);
//Open output URL
if (!(ofmt->flags & AVFMT_NOFILE))
{
    ret = avio_open(&out_pFormatCtx->pb, out_filename.c_str(), AVIO_FLAG_WRITE);
    if (ret < 0)
    {
        AfxMessageBox(L"Could not open output URL");
        //printf("Could not open output URL '%s'", out_filename);
        goto end;
    }
}
//Write file header
ret = avformat_write_header(out_pFormatCtx, NULL);
if (ret < 0)
{
    //printf("Error occurred when opening output URL\n");
    goto end;
}
start_time = av_gettime();
while (1)
{
    //Get an AVPacket
    ret = av_read_frame(i_pFormatCtx, &packet);
    if (ret < 0)
    {
        break;
    }
    if (G_PAUSE)
    {
        break;
    }

    /*
    if (packet.stream_index == audioindex)
    {
        //聲音:
        ret = avcodec_decode_audio4(pCodecCtx, pFrame, &frameAudioFinished,&packet);
    }
    */
    //Important:Delay
    if (packet.stream_index == videoindex)
    {
        //====================================================================================
        //幀解碼器:
        int a=avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);// Decode video frame
        //ret = avcodec_encode_video2(pCodecCtx, &packet, pFrameRGB, &frameFinished);
        if (frameFinished)
        {
            static struct SwsContext* img_convert_ctx;
            if (img_convert_ctx == NULL)
            {
                int w = pCodecCtx->width;
                int h = pCodecCtx->height;

                img_convert_ctx = sws_getContext(w, h,
                    pCodecCtx->pix_fmt,
                    w, h, AV_PIX_FMT_RGB24, 4,
                    NULL, NULL, NULL);
                if (img_convert_ctx == NULL) 
                {
                    fprintf(stderr, "Cannot initialize the conversion context!\n");
                    exit(1);
                }
            }
            int ret = sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
            if (ret == 0)
            {
                fprintf(stderr, "SWS_Scale failed [%d]!\n", ret);
                continue;
            }
            // Save the frame to disk
            if (i++ <= 5)
            {
                SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, i);
            }
            CopyDate(pFrameRGB, pCodecCtx->width, pCodecCtx->height, packet.pts - prepts);
            ret =avcodec_encode_video2(pCodecCtx, &packet, pFrameRGB, &frameFinished);
            if (ret < 0)
            {
                AfxMessageBox(L"Encoding失敗");
            }

            prepts = packet.pts;
        }



        //==============================================================================================
        AVRational time_base = i_pFormatCtx->streams[videoindex]->time_base;
        AVRational time_base_q = { 1,AV_TIME_BASE };
        int64_t pts_time = av_rescale_q(packet.dts, time_base, time_base_q);
        int64_t now_time = av_gettime() - start_time;
        if (pts_time > now_time)
        {
            av_usleep(pts_time - now_time);
        }   
    }
    //Simple Write PTS
    if (packet.pts == AV_NOPTS_VALUE)
    {
        //Write PTS
        AVRational time_base1 = i_pFormatCtx->streams[videoindex]->time_base;
        //Duration between 2 frames (us)
        int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(i_pFormatCtx->streams[videoindex]->r_frame_rate);
        //Parameters
        packet.pts = (double)(frame_index * calc_duration) / (double)(av_q2d(time_base1) * AV_TIME_BASE);
        packet.dts = packet.pts;
        packet.duration = (double)calc_duration / (double)(av_q2d(time_base1) * AV_TIME_BASE);
    }

    in_stream = i_pFormatCtx->streams[packet.stream_index];
    out_stream = out_pFormatCtx->streams[packet.stream_index];
    /* copy packet */
    //Convert PTS/DTS
    packet.pts = av_rescale_q_rnd(packet.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
    packet.dts = av_rescale_q_rnd(packet.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
    packet.duration = av_rescale_q(packet.duration, in_stream->time_base, out_stream->time_base);
    packet.pos = -1;
    //Print to Screen
    if (packet.stream_index == videoindex)
    {
        //printf("Send %8d video frames to output URL\n", frame_index);
        frame_index++;
        //ret = av_interleaved_write_frame(out_pFormatCtx, &packet);
    }
    //ret = av_write_frame(ofmt_ctx, &pkt);
    ret = av_interleaved_write_frame(out_pFormatCtx, &packet);

    if (ret < 0)
    {
        //printf("Error muxing packet\n");
        break;
    }
    av_free_packet(&packet);
}
//Write file trailer
av_write_trailer(out_pFormatCtx);

end:
AfxMessageBox(L"Stream is closed");
avformat_close_input(&i_pFormatCtx);
/* close output */
if (out_pFormatCtx && !(ofmt->flags & AVFMT_NOFILE))
avio_close(out_pFormatCtx->pb);
avformat_free_context(out_pFormatCtx);
if (ret < 0 && ret != AVERROR_EOF)
{
//printf("Error occurred.\n");
return ;
}
return;

  • 写回答

1条回答

  • CSDN-Ada助手 CSDN-AI 官方账号 2022-09-20 19:31
    关注
    不知道你这个问题是否已经解决, 如果还没有解决的话:

    如果你已经解决了该问题, 非常希望你能够分享一下解决方案, 写成博客, 将相关链接放在评论区, 以帮助更多的人 ^-^
    评论

报告相同问题?

悬赏问题

  • ¥30 这是哪个作者做的宝宝起名网站
  • ¥60 版本过低apk如何修改可以兼容新的安卓系统
  • ¥25 由IPR导致的DRIVER_POWER_STATE_FAILURE蓝屏
  • ¥50 有数据,怎么建立模型求影响全要素生产率的因素
  • ¥50 有数据,怎么用matlab求全要素生产率
  • ¥15 TI的insta-spin例程
  • ¥15 完成下列问题完成下列问题
  • ¥15 C#算法问题, 不知道怎么处理这个数据的转换
  • ¥15 YoloV5 第三方库的版本对照问题
  • ¥15 请完成下列相关问题!