使用Javacv+ffmpeg推流之后再拉流查看画面十分模糊:
拉流之后进行检测看到的画面:
将这个检测之后的画面推流到流媒体服务器之后再拉流查看:
推流和检测代码如下:
package cn.ck;
// 其他需要的导入...
import ai.onnxruntime.OnnxTensor;
import ai.onnxruntime.OrtEnvironment;
import ai.onnxruntime.OrtException;
import ai.onnxruntime.OrtSession;
import cn.ck.config.ODConfig;
import cn.ck.domain.ODResult;
import cn.ck.utils.ImageUtil;
import cn.ck.utils.Letterbox;
import org.bytedeco.ffmpeg.global.avcodec;
import org.bytedeco.javacv.*;
import org.opencv.core.*;
import org.opencv.highgui.HighGui;
import org.opencv.imgproc.Imgproc;
import org.opencv.videoio.VideoCapture;
import org.opencv.videoio.Videoio;
import java.io.IOException;
import java.nio.FloatBuffer;
import java.util.HashMap;
/**
* 摄像头识别,这是yolov7的视频识别例子,v5和v8的根据下面的思路,将其他文件中的代码复制过来即可
*
* 视频帧率15最佳,20也可以,不建议30,分辨率640最佳,720也可以。不建议1080,码率不要超过2048,1024最佳
*/
public class CameraDetection {
// 视频帧率15最佳,20也可以,不建议30,分辨率640最佳,720也可以。不建议1080,码率不要超过2048,1024最佳 。可在摄像头自带的管理页面中设备,主码流和子码流
public static void main(String[] args) throws OrtException, IOException {
// System.load(ClassLoader.getSystemResource("lib/opencv_videoio_ffmpeg470_64.dll").getPath());
nu.pattern.OpenCV.loadLocally();
// System.loadLibrary("lib/opencv_videoio_ffmpeg4100_64.dll"); // 确保库名称与实际文件名匹配
//linux和苹果系统需要注释这一行\\如果仅打开摄像头预览,这一行没有用,可以删除,如果rtmp或者rtsp等等这一样有用,也可以用pom依赖代替
String OS = System.getProperty("os.name").toLowerCase();
if (OS.contains("win")) {
System.load(ClassLoader.getSystemResource("lib/opencv_videoio_ffmpeg470_64.dll").getPath());
}
String model_path = "D:\\project\\smart-education\\videoTest\\yolo-onnx-java-master\\yolo-onnx-java-master\\src\\main\\resources\\model\\yolov7-tiny.onnx";
// String model_path = "src\\main\\resources\\model\\yolov7-tiny.onnx";
String[] labels = {
"person", "bicycle", "car", "motorcycle", "airplane", "bus", "train",
"truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter",
"bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear",
"zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase",
"frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat",
"baseball glove", "skateboard", "surfboard", "tennis racket", "bottle",
"wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
"sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut",
"cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet",
"tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave",
"oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors",
"teddy bear", "hair drier", "toothbrush"};
// 加载ONNX模型
OrtEnvironment environment = OrtEnvironment.getEnvironment();
OrtSession.SessionOptions sessionOptions = new OrtSession.SessionOptions();
// 使用gpu,需要本机按钻过cuda,并修改pom.xml,不安装也能运行本程序
// sessionOptions.addCUDA(0);
// 实际项目中,视频识别必须开启GPU,并且要防止队列堆积
OrtSession session = environment.createSession(model_path, sessionOptions);
// 输出基本信息
session.getInputInfo().keySet().forEach(x -> {
try {
System.out.println("input name = " + x);
System.out.println(session.getInputInfo().get(x).getInfo().toString());
} catch (OrtException e) {
throw new RuntimeException(e);
}
});
// 加载标签及颜色
ODConfig odConfig = new ODConfig();
VideoCapture video = new VideoCapture();
// 也可以设置为rtmp或者rtsp视频流:video.open("rtmp://192.168.1.100/live/test"), 海康,大华,乐橙,宇视,录像机等等
// video.open("rtsp://192.168.1.100/live/test")
// 也可以静态视频文件:video.open("video/car3.mp4"); flv 等
// 不持支h265视频编码,如果无法播放或者程序卡住,请修改视频编码格式
// video.open(0); //获取电脑上第0个摄像头
//拉流
video.open("http://192.168.137.7:9091/live_hls/a1.m3u8");
//video.open("images/car2.mp4"); //不开启gpu比较卡
//可以把识别后的视频在通过rtmp转发到其他流媒体服务器,就可以远程预览视频后视频,需要使用ffmpeg将连续图片合成flv 等等,很简单。
if (!video.isOpened()) {
System.err.println("打开视频流失败,未检测到监控,请先用vlc软件测试链接是否可以播放!,下面试用默认测试视频进行预览效果!");
video.open("video/car3.mp4");
}
// 在这里先定义下框的粗细、字的大小、字的类型、字的颜色(按比例设置大小粗细比较好一些)
int minDwDh = Math.min((int)video.get(Videoio.CAP_PROP_FRAME_WIDTH), (int)video.get(Videoio.CAP_PROP_FRAME_HEIGHT));
int thickness = minDwDh / ODConfig.lineThicknessRatio;
double fontSize = minDwDh / ODConfig.fontSizeRatio;
int fontFace = Imgproc.FONT_HERSHEY_SIMPLEX;
Mat img = new Mat();
// 跳帧检测,一般设置为3,毫秒内视频画面变化是不大的,快了无意义,反而浪费性能
int detect_skip = 4;
// 跳帧计数
int detect_skip_index = 1;
// 最新一帧也就是上一帧推理结果
float[][] outputData = null;
//当前最新一帧。上一帧也可以暂存一下
Mat image = new Mat();
Letterbox letterbox = new Letterbox();
OnnxTensor tensor;
// 使用多线程和GPU可以提升帧率,线上项目必须多线程!!!,一个线程拉流,将图像存到[定长]队列或数组或者集合,一个线程模型推理,中间通过变量或者队列交换数据,代码示例仅仅使用单线程
// FFmpeg 推流命令
// FFmpegFrameRecorder recorder = new FFmpegFrameRecorder("rtmp://192.168.137.7:8090/live/a2", 640,480 );
// recorder.setFormat("flv");
// recorder.setFrameRate(30);
// recorder.setGopSize((int) (recorder.getFrameRate() * 2));
// recorder.setVideoBitrate(500 * 1024);
// recorder.start();
// 实例化FFmpegFrameRecorder,将SRS的推送地址传入
FrameRecorder recorder = FrameRecorder.createDefault("rtmp://192.168.137.7:8090/live/a2", 640, 480);
// 降低启动时的延时,参考
// https://trac.ffmpeg.org/wiki/StreamingGuide)
recorder.setVideoOption("tune", "zerolatency");
recorder.setVideoOption("preset", "ultrafast");
// Constant Rate Factor (see: https://trac.ffmpeg.org/wiki/Encode/H.264)
recorder.setVideoOption("crf", "21");
// 2000 kb/s, reasonable "sane" area for 720
recorder.setVideoBitrate(1000000);
// 设置编码格式
recorder.setVideoCodec(avcodec.AV_CODEC_ID_H264);
// 设置封装格式
recorder.setFormat("flv");
// FPS (frames per second)
// 一秒内的帧数
recorder.setFrameRate(15);
// Key frame interval, in our case every 2 seconds -> 30 (fps) * 2 = 60
// 关键帧间隔
recorder.setGopSize((int) (recorder.getFrameRate() * 2));
// 帧录制器开始初始化
recorder.start();
// 创建转换器
OpenCVFrameConverter.ToIplImage converter = new OpenCVFrameConverter.ToIplImage();
// 假设 converter 有一个设置分辨率的方法
while (video.read(img)) {
if ((detect_skip_index % detect_skip == 0) || outputData == null){
image = img.clone();
image = letterbox.letterbox(image);
Imgproc.cvtColor(image, image, Imgproc.COLOR_BGR2RGB);
image.convertTo(image, CvType.CV_32FC1, 1. / 255);
float[] whc = new float[3 * 640 * 640];
image.get(0, 0, whc);
float[] chw = ImageUtil.whc2cwh(whc);
detect_skip_index = 1;
FloatBuffer inputBuffer = FloatBuffer.wrap(chw);
tensor = OnnxTensor.createTensor(environment, inputBuffer, new long[]{1, 3, 640, 640});
HashMap<String, OnnxTensor> stringOnnxTensorHashMap = new HashMap<>();
stringOnnxTensorHashMap.put(session.getInputInfo().keySet().iterator().next(), tensor);
// 运行推理
// 模型推理本质是多维矩阵运算,而GPU是专门用于矩阵运算,占用率低,如果使用cpu也可以运行,可能占用率100%属于正常现象,不必纠结。
OrtSession.Result output = session.run(stringOnnxTensorHashMap);
// 得到结果,缓存结果
try{
outputData = (float[][]) output.get(0).getValue();
}catch (OrtException e){}
}else{
detect_skip_index = detect_skip_index + 1;
}
for(float[] x : outputData){
ODResult odResult = new ODResult(x);
// 业务逻辑写在这里,注释下面代码,增加自己的代码,根据返回识别到的目标类型,编写告警逻辑。等等
// 实际项目中建议不要在视频画面上画框和文字,只告警,或者在告警图片上画框。画框和文字对视频帧率影响非常大
// 画框
Point topLeft = new Point((odResult.getX0() - letterbox.getDw()) / letterbox.getRatio(), (odResult.getY0() - letterbox.getDh()) / letterbox.getRatio());
Point bottomRight = new Point((odResult.getX1() - letterbox.getDw()) / letterbox.getRatio(), (odResult.getY1() - letterbox.getDh()) / letterbox.getRatio());
Scalar color = new Scalar(odConfig.getOtherColor(odResult.getClsId()));
Imgproc.rectangle(img, topLeft, bottomRight, color, thickness);
// 框上写文字
String boxName = labels[odResult.getClsId()];
Point boxNameLoc = new Point((odResult.getX0() - letterbox.getDw()) / letterbox.getRatio(), (odResult.getY0() - letterbox.getDh()) / letterbox.getRatio() - 3);
// 也可以二次往视频画面上叠加其他文字或者数据,比如物联网设备数据等等
Imgproc.putText(img, boxName, boxNameLoc, fontFace, 0.7, color, thickness);
// System.out.println(odResult+" "+ boxName);
}
// 保存告警图像到同级目录
// Imgcodecs.imwrite(ODConfig.savePicPath, img);
//服务器部署:由于服务器没有桌面,所以无法弹出画面预览,主要注释一下代码
HighGui.imshow("result", img);
// 多次按任意按键关闭弹窗画面,结束程序
if(HighGui.waitKey(1) != -1){
break;
}
image.convertTo(image, CvType.CV_8UC3);
// 将Mat转为Frame
Frame recordedFrame = converter.convert(image);
recorder.record(recordedFrame);
}
recorder.stop();
HighGui.destroyAllWindows();
video.release();
System.exit(0);
}
拉流查看的代码:
package run;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.boot.builder.SpringApplicationBuilder;
import org.springframework.context.annotation.Configuration;
import utils.PullStream;
/**
* @ClassName BroadcastApplication
* @Description : 启动类
* @Author : R
* @Date : 2020/8/17 17:18
*/
@SpringBootApplication
@Configuration
public class PullApplication {
public static void main(String[] args) throws Exception {
SpringApplicationBuilder builder = new SpringApplicationBuilder(PullApplication.class);
builder.headless(false).run(args);
System.out.println("---------------启动成功---------------");
//rtmp服务器拉流地址
// String inputPath = "rtmp://127.0.0.1/live/stream";
// String inputPath = "http://192.168.137.7:9091/live_hls/a1.m3u8";
// String inputPath = "http://192.168.137.7:9091/live_hls/a2.m3u8";
String inputPath = "http://192.168.137.7:8091/live_hls/a2.m3u8";
PullStream pullStream = new PullStream();
pullStream.getPullStream(inputPath);
}
}
package utils;
import org.bytedeco.javacpp.opencv_core;
import org.bytedeco.javacv.CanvasFrame;
import org.bytedeco.javacv.FFmpegFrameGrabber;
import org.bytedeco.javacv.Frame;
import org.bytedeco.javacv.OpenCVFrameConverter;
import javax.swing.*;
public class PullStream {
/**
* 播流器
* @param inputPath rtmp服务器地址
* @throws Exception
* @throws org.bytedeco.javacv.FrameRecorder.Exception
*/
public void getPullStream(String inputPath) throws Exception, org.bytedeco.javacv.FrameRecorder.Exception {
//创建+设置采集器
FFmpegFrameGrabber grabber = FFmpegFrameGrabber.createDefault(inputPath);
grabber.setOption("rtsp_transport", "tcp");
// grabber.setImageWidth(960);
// grabber.setImageHeight(540);
grabber.setImageWidth(0); // 设置为0以自动获取宽度
grabber.setImageHeight(0); // 设置为0以自动获取高度
//开启采集器
grabber.start();
//直播播放窗口
CanvasFrame canvasFrame = new CanvasFrame("直播------来自"+inputPath);
canvasFrame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
canvasFrame.setAlwaysOnTop(true);
OpenCVFrameConverter.ToMat converter = new OpenCVFrameConverter.ToMat();
//播流
while (true){
Frame frame = grabber.grabImage(); //拉流
opencv_core.Mat mat = converter.convertToMat(frame);
canvasFrame.showImage(frame); //播放
}
}
}