问题遇到的现象和发生背景
在做ffmpeg h264 aac封装mp4文件时,发现网上绝大部分都是通过读取文件来获取流数据及信息的,因为我是通过摄像头获取音视频流的,相关的流格式配置要手动配置,这就不是随便cv网上的代码就能搞定的了。通过大半个月的努力,集合了网上的代码,出了这一份音视频并不同步的代码。(大家不要随便复制粘贴,我也是0开始做音视频的)
问题相关代码,请勿粘贴截图
这部分是ffmpeg的实现
int getVopType( const void *p, int len )
{
if ( !p || 6 >= len )
return -1;
unsigned char *b = (unsigned char*)p;
// Verify NAL marker
if ( b[ 0 ] || b[ 1 ] || 0x01 != b[ 2 ] )
{ b++;
if ( b[ 0 ] || b[ 1 ] || 0x01 != b[ 2 ] )
return -1;
} // end if
b += 3;
// Verify VOP id
if ( 0xb6 == *b )
{
b++;
return ( *b & 0xc0 ) >> 6;
} // end if
switch( *b )
{
case 0x65 : return 0;
case 0x61 : return 1;
case 0x01 : return 2;
} // end switch
return -1;
}
int isIdrFrame2(uint8_t* buf, int len){
switch (buf[0] & 0x1f){
case 7: // SPS
return true;
case 8: // PPS
return true;
case 5:
return true;
case 1:
return false;
default:
return false;
break;
}
return false;
}
int isIdrFrame1(uint8_t* buf, int size){
//主要是解析idr前面的sps pps
int last = 0;
for (int i = 2; i <= size; ++i){
if (i == size) {
if (last) {
int ret = isIdrFrame2(buf+last ,i - last);
if (ret) {
//found = true;
return true;
}
}
} else if (buf[i - 2]== 0x00 && buf[i - 1]== 0x00 && buf[i] == 0x01) {
if (last) {
int size = i - last - 3;
if (buf[i - 3]) ++size;
int ret = isIdrFrame2(buf+last ,size);
if (ret) {
//found = true;
return true;
}
}
last = i + 1;
}
}
return false;
}
void make_dsi( unsigned int sampling_frequency_index, unsigned int channel_configuration, unsigned char* dsi )
{
unsigned int object_type = 2; // AAC LC by default
dsi[0] = (object_type<<3) | (sampling_frequency_index>>1);
dsi[1] = ((sampling_frequency_index&1)<<7) | (channel_configuration<<3);
}
int get_sr_index(unsigned int sampling_frequency)
{
switch (sampling_frequency) {
case 96000: return 0;
case 88200: return 1;
case 64000: return 2;
case 48000: return 3;
case 44100: return 4;
case 32000: return 5;
case 24000: return 6;
case 22050: return 7;
case 16000: return 8;
case 12000: return 9;
case 11025: return 10;
case 8000: return 11;
case 7350: return 12;
default: return 0;
}
}
static void SeekFrame(AVFormatContext* context, unsigned int seektime_sec)//跳转到指定位置
{
av_seek_frame(context, -1, seektime_sec * AV_TIME_BASE, AVSEEK_FLAG_BACKWARD);
}
/* Add an output stream */
AVStream *add_stream(AVFormatContext *oc, AVCodec **codec, enum AVCodecID codec_id)
{
AVCodecContext *c;
AVStream *st;
char dsi[2];
/* find the encoder */
*codec = avcodec_find_encoder(codec_id);
if (!*codec)
{
printf("could not find encoder for '%s' \n", avcodec_get_name(codec_id));
return NULL;
}
st = avformat_new_stream(oc, *codec);
if (!st)
{
printf("could not allocate stream \n");
return NULL;
}
printf("codec name:%s\n",(*codec)->name);
st->id = oc->nb_streams-1;
c = st->codec;
vi = st->index;
switch ((*codec)->type)
{
case AVMEDIA_TYPE_AUDIO:
printf("AVMEDIA_TYPE_AUDIO\n");
c->sample_fmt = (*codec)->sample_fmts ? (*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
c->codec_id = AV_CODEC_ID_AAC;
c->bit_rate = 64000;
c->sample_rate = SAMPLE_RATE;
c->channels = 1;
c->time_base.num = 1024;
c->time_base.den = c->sample_rate;
c->strict_std_compliance = -2;
make_dsi((unsigned int)get_sr_index(16000),1,dsi);
c->extradata = (uint8_t*)dsi;
c->extradata_size = 2;
break;
case AVMEDIA_TYPE_VIDEO:
printf("AVMEDIA_TYPE_VIDEO\n");
c->codec_id = AV_CODEC_ID_H264;
c->bit_rate = 1048576;
c->width = 1920;
c->height = 1080;
c->time_base.den = STREAM_FRAME_RATE;
c->time_base.num = 1;
c->gop_size = 1;
c->pix_fmt = AV_PIX_FMT_YUV420P;
if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO)
{
c->max_b_frames = 2;
}
if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO)
{
c->mb_decision = 2;
}
break;
default:
break;
}
//MP4格式需要全局头信息,AVI不需要
m_bNeedGlobalHeader = (oc->oformat->flags & AVFMT_GLOBALHEADER);
if(m_bNeedGlobalHeader)
{
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
//extradata空间大小只需能装下SPS,PPS,SEI等即可
c->extradata = (uint8_t*)av_malloc(32);
c->extradata_size = 0;
m_bHadFillGlobalHeader = false;
}
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
{
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
return st;
}
void open_VideoAudio(AVFormatContext *oc, AVCodec *codec, AVStream *st)
{
int ret;
AVCodecContext *c = st->codec;
/* open the codec */
ret = avcodec_open2(c, codec, NULL);
if (ret < 0)
{
MLOGE("could not open video codec\n");
//exit(1);
}
}
int CreateMp4()
{
ptsInc = 0,frameindex = 0;
int ret = 0; // 成功返回0,失败返回-1
char filepath[273] = "";
AVOutputFormat *fmt;
AVCodec *video_codec;
AVCodec *audio_codec;
/* create mp4 file */
time_t now= time(NULL);
struct tm *tm = localtime(&now);
snprintf(filepath, sizeof(filepath), "%s%s%04d-%02d%02d-%02d%02d%02d.%s", FILE_ROOT,FILENAME_PREFIX,
tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec,
"mp4");
avformat_alloc_output_context2(&m_pOc, NULL, NULL, filepath);
if (!m_pOc)
{
MLOGE("Could not deduce output format from file extension: using MPEG. \n");
avformat_alloc_output_context2(&m_pOc, NULL, NULL, filepath);
}
if (!m_pOc)
{
ret = -1;
goto QUIT;
}
fmt = m_pOc->oformat;
MLOGD("video codec:%d audio codec:%d\n",fmt->video_codec,fmt->audio_codec);
if (fmt->video_codec != AV_CODEC_ID_NONE && fmt->audio_codec != AV_CODEC_ID_NONE)
{
m_pVideoSt = add_stream(m_pOc, &video_codec, fmt->video_codec);
m_pAudioSt = add_stream(m_pOc, &audio_codec, fmt->audio_codec);
}else{
MLOGE(RED"format ID NONE,add stream fail!\n");
}
if (m_pVideoSt && m_pAudioSt)
{
MLOGD("open video and audio\n");
}
printf("==========Output Information==========\n");
av_dump_format(m_pOc, 0, filepath, 1);
printf("======================================\n");
/* open the output file, if needed */
if (!(fmt->flags & AVFMT_NOFILE))
{
ret = avio_open(&m_pOc->pb, filepath, AVIO_FLAG_WRITE);
if (ret < 0)
{
MLOGE("could not open %s\n", filepath);
ret = -1;
goto QUIT;
}
}
/* Write the stream header, if any */
ret = avformat_write_header(m_pOc, NULL);
if (ret < 0)
{
MLOGE("Error occurred when opening output file");
ret = -1;
goto QUIT;
}
QUIT:
return ret;
}
/* write h264 data to mp4 file
keyframe < 0,为音频帧 faac
* 创建mp4文件返回2;写入数据帧返回0
*
* Video
* av_q2d(st->time_base): 秒/格
* av_rescale_q(int64_t a, AVRational bq, AVRational cq)计算a*bq / cq来把时间戳从一个时间基调整到另外一个时间基,表示在bq下的占a个格子,在cq下是多少
*
*
*Audio
* 音频sample_rate:samples per second,即采样率,表示每秒采集多少采样点。
比如44100HZ,就是一秒采集44100个sample.
即每个sample的时间是1/44100秒
* 一个音频帧的AVFrame有nb_samples个sample,所以一个AVFrame耗时是nb_samples乘以(1/44100)秒,即标准时间下duration_s=nb_samples乘以(1/44100)秒,即标准时间下duration_s=nb_samples乘以(1/44100)秒,
* 转换成AVStream时间基下
duration=duration_s / av_q2d(st->time_base)
基于st->time_base的num值一般等于采样率,所以duration=nb_samples.
pts=n* duration=n *nb_samples*/
int WriteVideoAudio(uint8_t* data, int nLen,unsigned long int timestamp,const int keyframe)
{
int ret;
int64_t calc_duration;
AVPacket pkt;
av_init_packet( &pkt );
// AVStream *pst = m_pOc->streams[ vi ];
if(keyframe >= 0)//轮到视频写入
{
// 我的添加,为了计算pts
AVCodecContext *c = m_pVideoSt->codec;
int isI = keyframe;
pkt.flags |= isI ? AV_PKT_FLAG_KEY : 0;
pkt.stream_index = m_pVideoSt->index;
pkt.data = data;
pkt.size = nLen;
// Wait for key frame
if ( waitkey ){
if ( 0 == ( pkt.flags & AV_PKT_FLAG_KEY ) ){
ret = -1;
goto EXIT;
}
else
waitkey = 0;
}
if(isI && !m_bHadFillGlobalHeader)
{
memcpy(m_pVideoSt->codec->extradata + m_pVideoSt->codec->extradata_size, pkt.data, 32);
m_pVideoSt->codec->extradata_size += 32;
m_bHadFillGlobalHeader = 1;
pkt.data += 32;
}
//way 1
#if 1
pkt.pts = av_rescale_q((frameindex++), m_pVideoSt->codec->time_base,m_pVideoSt->time_base);//不同时间基的转换
pkt.dts = pkt.pts;
pkt.duration = av_rescale_q(pkt.duration,m_pVideoSt->time_base, m_pVideoSt->time_base);
pkt.pos = -1;
cur_pts_v=pkt.pts;
#elif
//av_q2d 转换后的单位是s
//way 2
//Duration between 2 frames (us)
calc_duration=(double)AV_TIME_BASE/av_q2d(m_pVideoSt->codec->time_base);//内部时间 每帧间隔
//Parameters
pkt.pts=(double)(frameindex*calc_duration)/((double)(av_q2d(m_pVideoSt->codec->time_base)*AV_TIME_BASE));
pkt.dts=pkt.pts;
pkt.duration=(double)calc_duration/((double)(av_q2d(m_pVideoSt->codec->time_base)*AV_TIME_BASE));
frameindex++;
cur_pts_v=pkt.pts;
#endif
pkt.pts = av_rescale_q(pkt.pts,m_pVideoSt->codec->time_base,m_pAudioSt->codec->time_base);//转成视频时间基
pkt.dts=pkt.pts;
pkt.duration=av_rescale_q(pkt.duration,m_pVideoSt->codec->time_base,m_pAudioSt->codec->time_base);
}
else{//轮到音频写入
pkt.stream_index = m_pAudioSt->index;
pkt.data = data;
pkt.size = nLen;
//way 1
#if 1
pkt.pts = av_rescale_q(Audioframeindex++, m_pAudioSt->codec->time_base,m_pAudioSt->time_base);//不同时间基的转换
pkt.dts = pkt.pts;
pkt.duration = av_rescale_q(pkt.duration,m_pAudioSt->time_base, m_pAudioSt->time_base);
pkt.pos = -1;
cur_pts_a=pkt.pts;
#elif
//way 2
//Write PTS
AVRational time_base1=m_pAudioSt->codec->time_base;
//Duration between 2 frames (us)
calc_duration=(double)AV_TIME_BASE/av_q2d(time_base1);
//Parameters
pkt.pts=(double)(frameindex*calc_duration)/(double)(av_q2d(time_base1)*AV_TIME_BASE);
pkt.dts=pkt.pts;
pkt.duration=(double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE);
// pkt.duration=av_rescale_q(pkt.duration,m_pAudioSt->codec->time_base,m_pVideoSt->codec->time_base);
// av_bitstream_filter_filter(aacBitstreamFilterContext, m_pAudioSt->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0);
#endif
cur_pts_a=pkt.pts;
}
ret = av_interleaved_write_frame( m_pOc, &pkt );
if (ret < 0)
{
MLOGE("cannot write frame");
ret = -1;
}
av_free_packet(&pkt);
EXIT:
return ret;
}
void CloseMp4()
{
waitkey = -1;
vi = -1;
int ret = 0;
m_pVideoSt = NULL;
m_pAudioSt = NULL;
cur_pts_v=0,cur_pts_a=0,Audioframeindex=0,frameindex = 0;
if (m_pOc){
av_write_trailer(m_pOc);
}
if (m_pOc && !(m_pOc->oformat->flags & AVFMT_NOFILE)){
avio_close(m_pOc->pb);
}
if (m_pOc)
{
avformat_free_context(m_pOc);
m_pOc = NULL;
}
}
void ffmpegMp4_init(){
av_register_all();
avcodec_register_all();
}
int ffmpegMp4_compare_ts(){
return av_compare_ts(cur_pts_v,m_pVideoSt->codec->time_base,cur_pts_a,m_pAudioSt->codec->time_base);
}
/*success :return filename
* fail: return NULL*/
unsigned char *ffmpegMp4_Openinputfile(char *in_filename,AVFormatContext *out_ifmt_ctx,int stream_v,int stream_a)
{
if(Readframe_ifmt_ctx != NULL){
printf("AVFormatContext need uninit!!!!\n\n");
return NULL;
}
int ret, i;
char buf[1024] = {0};
AVCodec *video_codec;
AVCodec *audio_codec;
MLOGD("\n");
//Input
if ((ret = avformat_open_input(&Readframe_ifmt_ctx, in_filename, 0, 0)) < 0) {
av_strerror(ret,buf,1024);
MLOGE(RED"avformat_open_input:%s %s\n",in_filename,buf);
return NULL;
}
MLOGD("\n");
Readframe_videoindex=-1;Readframe_audioindex=-1;
for(i=0; i<Readframe_ifmt_ctx->nb_streams; i++) {
if(Readframe_ifmt_ctx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
Readframe_videoindex=i;
}else if(Readframe_ifmt_ctx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO){
Readframe_audioindex=i;
}
}
MLOGD("videoindex:%d audioindex:%d\n",Readframe_videoindex,Readframe_audioindex);
//Dump Format------------------
printf("\nInput Video===========================\n");
av_dump_format(Readframe_ifmt_ctx, 0, in_filename, 0);
printf("\n======================================\n");
#if USE_H264BSF
Readframe_h264bsfc = av_bitstream_filter_init("h264_mp4toannexb");
#endif
memcpy(out_ifmt_ctx,Readframe_ifmt_ctx,sizeof(AVFormatContext));
stream_v = Readframe_videoindex;
stream_a = Readframe_audioindex;
return Readframe_ifmt_ctx->filename;
}
/*
FIX: H.264 in some container format (FLV, MP4, MKV etc.) need
"h264_mp4toannexb" bitstream filter (BSF)
*Add SPS,PPS in front of IDR frame
*Add start code ("0,0,0,1") in front of NALU
H.264 in some container (MPEG2TS) don't need this BSF.
*/
int ffmpegMp4_Readframe(unsigned char *video_data,int *video_size,int *isKeyframe,unsigned long int *timestamp,unsigned char * audio_data,int *audio_size){
int ret;
if(Readframe_ifmt_ctx == NULL){
MLOGE("Mp4 had not open!\n");
return -1;
}
av_init_packet( &Readframe_pkt );
if(av_read_frame(Readframe_ifmt_ctx, &Readframe_pkt)>=0){
if(Readframe_pkt.stream_index==Readframe_videoindex){
*timestamp = Readframe_pkt.pts * av_q2d(Readframe_ifmt_ctx->streams[Readframe_pkt.stream_index]->time_base) * 1000;
#if USE_H264BSF
uint8_t* outbuf = NULL;
int outlen = 0;
int ret = av_bitstream_filter_filter(Readframe_h264bsfc, Readframe_ifmt_ctx->streams[Readframe_videoindex]->codec, NULL, &outbuf, &outlen, Readframe_pkt.data, Readframe_pkt.size, 0);
if(ret < 0){
char buf[1024];
av_strerror(ret,buf,1024);
MLOGE(RED"error: %s\n",buf);
}else if(ret == 0){
MLOGE("buffer is not allocated\n");
}else{
/*handle read video frame*/
memcpy(video_data,outbuf,outlen);
*video_size = outlen;
*isKeyframe = Readframe_pkt.flags;
av_free(outbuf);
}
#else
/*handle read video frame*/
memcpy(video_data,outbuf,outlen);
*video_size = outlen;
*isKeyframe = Readframe_pkt.flags;
#endif
}else if(Readframe_pkt.stream_index==Readframe_audioindex){
/*
AAC in some container format (FLV, MP4, MKV etc.) need to add 7 Bytes
ADTS Header in front of AVPacket data manually.
Other Audio Codec (MP3...) works well.
*/
*timestamp = Readframe_pkt.pts * av_q2d(Readframe_ifmt_ctx->streams[Readframe_pkt.stream_index]->time_base) * 1000;
/*handle read Audio frame*/
memcpy(audio_data,Readframe_pkt.data,Readframe_pkt.size);
*audio_size = Readframe_pkt.size;
*isKeyframe = -1;
}
av_free_packet(&Readframe_pkt);
}else{
av_free_packet(&Readframe_pkt);
return ffmpegMp4_StopReadframe();
}
return 0;
}
int ffmpegMp4_StopReadframe(){
if(Readframe_ifmt_ctx == NULL)
return STOPLOOP;
int ret;
#if USE_H264BSF
av_bitstream_filter_close(Readframe_h264bsfc);
#endif
avformat_close_input(&Readframe_ifmt_ctx);
Readframe_ifmt_ctx = NULL;
printf("stop readframe done!\n");
return STOPLOOP;
}
int ffmpeg_readframetest(char *o_videofile,char *o_audiofile,char *readmp4file){
int ret = 0;
FILE * videofp = fopen(o_videofile,"wb");
if(videofp == NULL){
printf("open file fail\n\n");
return 0;
}
FILE * audiofp = fopen(o_audiofile,"wb");
if(audiofp == NULL){
printf("open file fail\n\n");
return 0;
}
// ffmpegMp4_Openinputfile(readmp4file);
#if USE_H264BSF
Readframe_h264bsfc = av_bitstream_filter_init("h264_mp4toannexb");
#endif
unsigned long int timestamp = 0;
while(1){
av_init_packet( &Readframe_pkt );
if(av_read_frame(Readframe_ifmt_ctx, &Readframe_pkt)>=0){
if(Readframe_pkt.stream_index==Readframe_videoindex){
timestamp = atoi(av_ts2str(Readframe_pkt.pts));
#if USE_H264BSF
av_bitstream_filter_filter(Readframe_h264bsfc, Readframe_ifmt_ctx->streams[Readframe_videoindex]->codec, NULL, &Readframe_pkt.data, &Readframe_pkt.size, Readframe_pkt.data, Readframe_pkt.size, 0);
#endif
printf("Video Packet. size:%d pts:%d timestamp:%ld duration:%lf timebase:%lf\n"
,Readframe_pkt.size,atoi(av_ts2str(Readframe_pkt.pts)),timestamp,(double)Readframe_pkt.duration,av_q2d(Readframe_ifmt_ctx->streams[Readframe_pkt.stream_index]->time_base));
/*handle read video frame*/
if(1 != (ret = fwrite(Readframe_pkt.data,Readframe_pkt.size,1,videofp))){
printf("fwrite len error:%d\n",ret);
}
}else if(Readframe_pkt.stream_index==Readframe_audioindex){
/*
AAC in some container format (FLV, MP4, MKV etc.) need to add 7 Bytes
ADTS Header in front of AVPacket data manually.
Other Audio Codec (MP3...) works well.
*/
printf("Audio Packet. size:%d pts:%d timestamp:%ld duration:%lf timebase:%lf\n"
,Readframe_pkt.size,atoi(av_ts2str(Readframe_pkt.pts)),timestamp,(double)Readframe_pkt.duration,av_q2d(Readframe_ifmt_ctx->streams[Readframe_pkt.stream_index]->time_base));
/*handle read Audio frame*/
if(1 != (ret = fwrite(Readframe_pkt.data,Readframe_pkt.size,1,audiofp))){
printf("fwrite len error:%d\n",ret);
}
}
av_free_packet(&Readframe_pkt);
}else
break;
}
printf("done!\n");
ffmpegMp4_StopReadframe();
return 0;
}
这部分是音视频数据写入
void * MainStreamVAlocalstorage(void *argv){
MLOGD(YELLOW"=============MainStreamVAlocalstorage server================\r\n");
/*video*/
MI_S32 s32Ret = MI_SUCCESS;
MI_VENC_Stream_t stStream;
MI_VENC_CHN VencChn = 0;//0:main_stream, 1:sub_stream0
MI_U32 i = 0,count = 0;
MI_VENC_ChnStat_t stStat;
MI_S32 s32Fd;
fd_set readFdSet;
int ret = 0;
struct timeval stTimeOut;
long long timestamp = 0;
HI_U32 sdstatus = 0;
int keyframe = 0;
int out_bufferlen = 0;
/*audio*/
MI_U32 u32AiDevId = 0;
MI_U32 u32AiChn = 0;
s32Ret = MI_AI_DupChn(u32AiDevId,u32AiChn); //RTOS
if(s32Ret != MI_SUCCESS)
{
MLOGE("MI_AI_DupChn Faild!Cannot catch Audio Input!Quuit Task!\r\n");
return NULL;
}
MI_AUDIO_DEV AiDevId = 0;
MI_AI_CHN AiChn = 0;
MI_AUDIO_Frame_t stAiChFrame;
struct timeval tv;
unsigned char out_faacbuffer[2048];
int j = 0,count_loop = 0;
/*ffmpeg create local file.mp4*/
CreateMp4();
/* get port fd */
s32Fd = MI_VENC_GetFd(VencChn);
if (s32Fd < 0)
{
MLOGE("VENC Chn%d failed to GetFd error:%d!!!\r\n", VencChn,s32Fd);
return NULL;
}
while(main_streamrun)
{
if((ret = ffmpegMp4_compare_ts()) <= 0)
{
/* video
* select 40ms */
FD_ZERO(&readFdSet);
FD_SET(s32Fd, &readFdSet);
stTimeOut.tv_sec = 0;
stTimeOut.tv_usec = 100 * 1000;
s32Ret = select(s32Fd + 1, &readFdSet, NULL, NULL, &stTimeOut);
if (s32Ret < 0)
{
MLOGE("select err\r\n");
continue;
}
else if (0 == s32Ret)
{
MLOGE("select timeout\r\n");
continue;
}
else
{
if (FD_ISSET(s32Fd, &readFdSet))
{
memset(&stStream, 0, sizeof(MI_VENC_Stream_t));
s32Ret = MI_VENC_Query(VencChn, &stStat);
if (s32Ret != MI_SUCCESS)
{
MLOGE("Get ChnStat Faild!\r\n");
continue;
}
if (0 == stStat.u32CurPacks || 0 == stStat.u32LeftStreamFrames)
{
MLOGE("ChnStat is Invalid!\r\n");
continue;
}
stStream.pstPack = (MI_VENC_Pack_t*)malloc(sizeof(MI_VENC_Pack_t)*stStat.u32CurPacks);
if(stStream.pstPack == NULL)
{
MLOGE("malloc stStream.pstPack Faild!\r\n");
continue;
}
stStream.u32PackCount = stStat.u32CurPacks;
s32Ret = MI_VENC_GetStream(VencChn, &stStream, 0);
if(MI_SUCCESS == s32Ret)
{
for (i = 0; i < stStream.u32PackCount; i ++)
{
/*local video storage*/
unsigned long int temp = cur_tv.tv_sec;
gettimeofday(&cur_tv, NULL);
timestamp = cur_tv.tv_sec + cur_tv.tv_usec;
if(stStream.pstPack[i].stDataType.eH264EType == E_MI_VENC_H264E_NALU_ISLICE)
keyframe = 1;
else
keyframe = 0;
WriteVideoAudio(stStream.pstPack[i].pu8Addr + stStream.pstPack[i].u32Offset
,stStream.pstPack[i].u32Len - stStream.pstPack[i].u32Offset
,timestamp
,keyframe);
if(!reloop_mainVideo){
memcpy(&start_tv,&cur_tv,sizeof(struct timeval));
reloop_mainVideo = 1;
}
/*loop record local video*/
if(cur_tv.tv_sec - start_tv.tv_sec >= VIDEO_DURATION){
reloop_mainVideo = 0;
CloseMp4();
CreateMp4();
}
}
s32Ret = MI_VENC_ReleaseStream(VencChn, &stStream);
if(MI_SUCCESS != s32Ret)
{
MLOGE("MI_VENC_ReleaseStream fail, ret:0x%x\n", s32Ret);
break;
}
}
else
{
free(stStream.pstPack);
MLOGE("MI_VENC_GetStream Faild,ret:[%x],Continue!!!\n", s32Ret);
}
}
}
}else{//audio
memset(&stAiChFrame, 0, sizeof(MI_AUDIO_Frame_t));
s32Ret = MI_AI_GetFrame(AiDevId, AiChn, &stAiChFrame, NULL, -1);
if (MI_SUCCESS == s32Ret)
{
/*local video storage*/
gettimeofday(&tv, NULL);
timestamp = tv.tv_sec * 1000000 + tv.tv_usec;
//conver to faac
out_bufferlen = Faac_encode(stAiChFrame.apSrcPcmVirAddr[0],stAiChFrame.u32SrcPcmLen[0],out_faacbuffer);
//aac
WriteVideoAudio(out_faacbuffer,out_bufferlen,timestamp,-1);
MI_AI_ReleaseFrame(AiDevId, AiChn, &stAiChFrame, NULL);
}
else
{
MLOGE("MI_AI_GetFrame failed:%02x! \n",s32Ret);
usleep(100*1000); continue;
}
}
}
EXIT
main_streamrun = HI_FALSE;
MLOGD("exit\n");
return 0;
}
运行结果及报错内容
结果是视频和音频逐渐不同步,1分钟的MP4文件,最后每次丢失了5秒的音频
我的解答思路和尝试过的方法
主要原因大概是在时间基的设置上,没什么头绪,有人可以给个建议?
我想要达到的结果
达到音视频同步即可