基于Linux系统,QT平台,如何使用3代版本的ffmpeg 的 API接口进行视频录制呢?
命令行已经实现了。
但是我不能将ffmpeg移植到开发板,现在只能用API接口实现。
请问有高人指点一下吗?
只需要录制摄像头即可。
基于Linux系统,QT平台,如何使用3代版本的ffmpeg 的 API接口进行视频录制呢?
命令行已经实现了。
但是我不能将ffmpeg移植到开发板,现在只能用API接口实现。
请问有高人指点一下吗?
只需要录制摄像头即可。
本工程qt用的版本是5.8-32位,ffmpeg用的版本是较新的5.1版本。它支持TCP或UDP方式拉取实时流,实时流我采用的是监控摄像头的RTSP流。音频播放采用的是QAudioOutput,视频经ffmpeg解码并由YUV转RGB后是在QOpenGLWidget下进行渲染显示。本工程的代码有注释,可以通过本博客查看代码或者在播放最后的链接处下载工程demo。
可以使用ffmpeg API接口在Linux系统上录制摄像头视频。以下是具体的解决方案:
sudo apt-get install ffmpeg
LIBS += -lavformat -lavcodec -lavutil -lswscale
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
#include <time.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/time.h>
#include <linux/videodev2.h>
#include <libavformat/avformat.h>
#include <libavutil/opt.h>
#define CLEAR(x) memset(&(x), 0, sizeof(x))
char *dev_name = "/dev/video0";
char *file_name = "test.mp4";
int fps = 25;
int image_width = 640;
int image_height = 480;
int video_bitrate = 1000000;
AVFormatContext *av_ctx;
AVOutputFormat *av_fmt;
AVStream *out_stream;
AVCodec *av_codec;
AVCodecContext *av_codec_ctx;
AVFrame *av_frame;
AVFrame *av_frame_BGR24;
AVPacket av_packet;
struct SwsContext *sws_ctx;
struct timeval tv_now, tv_prev;
double time_diff;
int init_device(int fd)
{
struct v4l2_capability cap;
struct v4l2_format fmt;
struct v4l2_requestbuffers req;
CLEAR(cap);
CLEAR(fmt);
CLEAR(req);
if (ioctl(fd, VIDIOC_QUERYCAP, &cap) < 0) {
perror("VIDIOC_QUERYCAP");
return -1;
}
if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE)) {
fprintf(stderr, "The device does not support capturing.\n");
return -1;
}
if (!(cap.capabilities & V4L2_CAP_STREAMING)) {
fprintf(stderr, "The device does not support streaming i/o.\n");
return -1;
}
fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
fmt.fmt.pix.width = image_width;
fmt.fmt.pix.height = image_height;
fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
fmt.fmt.pix.field = V4L2_FIELD_INTERLACED;
if (ioctl(fd, VIDIOC_S_FMT, &fmt) < 0) {
perror("VIDIOC_S_FMT");
return -1;
}
req.count = 4;
req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
req.memory = V4L2_MEMORY_MMAP;
if (ioctl(fd, VIDIOC_REQBUFS, &req) < 0) {
perror("VIDIOC_REQBUFS");
return -1;
}
return 0;
}
int init_mmap(int fd)
{
int i;
struct v4l2_buffer buf;
for (i = 0; i < 4; ++i) {
CLEAR(buf);
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
buf.index = i;
if (ioctl(fd, VIDIOC_QUERYBUF, &buf) < 0) {
perror("VIDIOC_QUERYBUF");
return -1;
}
av_frame->data[i] = mmap(NULL, buf.length, PROT_READ | PROT_WRITE, MAP_SHARED, fd, buf.m.offset);
av_frame->linesize[i] = buf.length / image_height;
if (av_frame->data[i] == MAP_FAILED) {
perror("mmap");
return -1;
}
}
return 0;
}
int init_ffmpeg(const char *file_name)
{
avformat_network_init();
av_register_all();
avcodec_register_all();
av_fmt = av_guess_format(NULL, file_name, NULL);
av_ctx = avformat_alloc_context();
av_ctx->oformat = av_fmt;
if (avio_open(&av_ctx->pb, file_name, AVIO_FLAG_WRITE) < 0) {
perror("avio_open");
return -1;
}
out_stream = avformat_new_stream(av_ctx, NULL);
av_codec_ctx = out_stream->codec;
av_codec_ctx->codec_id = AV_CODEC_ID_H264;
av_codec_ctx->codec_type = AVMEDIA_TYPE_VIDEO;
av_codec_ctx->width = image_width;
av_codec_ctx->height = image_height;
av_codec_ctx->bit_rate = video_bitrate;
out_stream->time_base.den = fps;
out_stream->time_base.num = 1;
av_codec_ctx->time_base = out_stream->time_base;
av_codec_ctx->qmax = 51;
av_codec_ctx->qmin = 10;
av_codec_ctx->max_b_frames = 0;
av_codec_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
avcodec_open2(av_codec_ctx, av_codec, NULL);
av_dump_format(av_ctx, 0, file_name, 1);
av_frame = av_frame_alloc();
av_frame->format = av_codec_ctx->pix_fmt;
av_frame->width = av_codec_ctx->width;
av_frame->height = av_codec_ctx->height;
av_frame_BGR24 = av_frame_alloc();
av_frame_BGR24->format = AV_PIX_FMT_BGR24;
av_frame_BGR24->width = av_codec_ctx->width;
av_frame_BGR24->height = av_codec_ctx->height;
avpicture_alloc((AVPicture*)av_frame_BGR24, AV_PIX_FMT_BGR24, av_codec_ctx->width, av_codec_ctx->height);
av_new_packet(&av_packet, avpicture_get_size(AV_PIX_FMT_YUV420P, av_codec_ctx->width, av_codec_ctx->height));
avformat_write_header(av_ctx, NULL);
return 0;
}
int main(int argc, char *argv[])
{
int fd;
struct stat st;
fd = open(dev_name, O_RDWR | O_NONBLOCK, 0);
if (fd < 0) {
fprintf(stderr, "Cannot open '%s': %d, %s\n",
dev_name, errno, strerror(errno));
return -1;
}
if (init_device(fd) < 0)
exit(-1);
if (init_mmap(fd) < 0)
exit(-1);
if (init_ffmpeg(file_name) < 0)
exit(-1);
gettimeofday(&tv_prev, NULL);
while (1) {
gettimeofday(&tv_now, NULL);
time_diff = (tv_now.tv_sec - tv_prev.tv_sec) + (tv_now.tv_usec - tv_prev.tv_usec) / 1000000.0;
if (time_diff >= 1.0 / fps) {
tv_prev = tv_now;
if (read(fd, av_frame->data[0], image_width * image_height * 2) == -1) {
perror("read");
exit(-1);
}
sws_ctx = sws_getContext(av_codec_ctx->width, av_codec_ctx->height, AV_PIX_FMT_YUYV, av_codec_ctx->width, av_codec_ctx->height, AV_PIX_FMT_BGR24, SWS_FAST_BILINEAR, NULL, NULL, NULL);
sws_scale(sws_ctx, (const uint8_t *const*)av_frame->data, av_frame->linesize, 0, av_codec_ctx->height, av_frame_BGR24->data, av_frame_BGR24->linesize);
av_frame_BGR24->pts = av_frame->pts;
av_packet.flags |= AV_PKT_FLAG_KEY;
av_packet.pts = av_frame->pts;
av_packet.dts = av_packet.pts;
av_packet.stream_index = out_stream->index;
av_packet.data = (uint8_t*)av_frame_BGR24->data[0];
av_packet.size = avpicture_get_size(AV_PIX_FMT_YUV420P, av_codec_ctx->width, av_codec_ctx->height);
sws_freeContext(sws_ctx);
if (av_interleaved_write_frame(av_ctx, &av_packet) < 0) {
perror("av_interleaved_write_frame");
exit(-1);
}
av_free_packet(&av_packet);
}
}
avcodec_close(av_codec_ctx);
avformat_close_input(&av_ctx);
close(fd);
return 0;
}
在上面的代码中,main函数中的while循环不断地读取摄像头视频帧数据,并将其使用ffmpeg编码并写入输出视频文件中。其中,sws_ctx是用于转换颜色空间的结构体,sws_scale函数用于转换颜色空间。此外,AV_PIX_FMT_YUV420P是一种与libx264编码器兼容的像素格式,可以在ffmpeg中使用。