文章目录
- 1.关于Javacv
- ~~2. [官网下载最新OpenCV4.8](https://opencv.org/releases/),并解压~~ *不一定要安装opencv*
- ~~3. 将opencv的jar包及动态库dll文件引入项目~~
- 4.pom引入javacv库
- 5.测试
- 5.1 图片美颜
- 5.2 图片人脸检测
- 5.3 提取视频中的语音
- 5.4 音视频剪辑
- 5.5 录屏
- 5.6 推流与流媒体播放 [参考](https://xinchen.blog.csdn.net/article/details/121434969)
- 5.7 摄像头的几个案例 [参考](https://xinchen.blog.csdn.net/article/details/121572093)
- 5.7.1 保存摄像头视频为mp4
- 5.7.2 摄像头抓图
- 5.7.3 摄像头推流
- 5.8 人脸识别训练及预测
- 5.8.1 使用Javacv训练人脸识别模型
- 5.8.2 使用模型预测人脸照片
- 5.8.3 只需要将图片读取人脸改为摄像头抓取即可实现人脸检测并识别
1.关于Javacv
基于opencv实现,用于实现图片、音视频处理,视频捕捉处理;多媒体RTMP、HLS拉流推流; 机器学习如图像识别、人脸识别等业务实现。这些特性可能在python实现得可能更好或更适合,但Javacv感觉还是不错的。
2. 官网下载最新OpenCV4.8,并解压 不一定要安装opencv
3. 将opencv的jar包及动态库dll文件引入项目
① E:\opencv\build\java\opencv-480.jar
可以通过maven命令直接安装到本地maven仓库,也可以IDEA settings->project Structure->Libraries-> “+” 入该jar
②E:\opencv\build\java\x64\opencv_java480.dll
可以直接copy到动态库搜索路径如C:\Windows\System32
或通过环境变量设置或直接在代码中加载该库 System.load("E:\\opencv\\build\\java\\x64\\opencv_java480.dll");
4.pom引入javacv库
<dependency><groupId>org.bytedeco</groupId><artifactId>javacv-platform</artifactId><version>1.5.9</version></dependency>
5.测试
5.1 图片美颜
package cv;import org.bytedeco.opencv.opencv_core.Mat;import java.io.File;import static org.bytedeco.opencv.global.opencv_imgcodecs.imread;
import static org.bytedeco.opencv.global.opencv_imgcodecs.imwrite;
import static org.bytedeco.opencv.global.opencv_imgproc.bilateralFilter;public class Meiyan {public static void main(String[] args) {Mat result = new Mat();Mat image = imread("D:\\dayun.jpg");int level = 18;// 值越大,过滤强度越大bilateralFilter(image, result, level, level * 2, level / 2);File out = new File("out.png");imwrite(out.getPath(), result);}
}
5.2 图片人脸检测
注意,检查到的人脸会圈出,有些人脸可能检测不到;这里加载人脸检测CascadeClassifier文件是来自opencv安装包或其他地方找一个即可
package cv;import org.bytedeco.opencv.opencv_core.*;
import org.bytedeco.opencv.opencv_objdetect.CascadeClassifier;
import static org.bytedeco.opencv.global.opencv_imgcodecs.imread;
import static org.bytedeco.opencv.global.opencv_imgcodecs.imwrite;
import static org.bytedeco.opencv.global.opencv_imgproc.LINE_8;
import static org.bytedeco.opencv.global.opencv_imgproc.rectangle;public class FaceDetector {public static void main(String[] args) {// Load the imageMat image = imread("D://meinv.jpeg");// Load the face cascade classifierCascadeClassifier faceCascade = new CascadeClassifier("E:\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_alt.xml");// Detect faces in the imageRectVector faceDetections = new RectVector();faceCascade.detectMultiScale(image, faceDetections);// Draw a rectangle around each detected facefor (Rect rect : faceDetections.get()) {rectangle(image, new Point(rect.x(), rect.y()), new Point(rect.x() + rect.width(), rect.y() + rect.height()),new Scalar(0, 255, 0, 0), 2, LINE_8, 0);}// Save the image with the detected facesimwrite("face.jpg", image);}
}
5.3 提取视频中的语音
package cv;import org.bytedeco.javacv.FFmpegFrameGrabber;
import org.bytedeco.javacv.FFmpegFrameRecorder;
import org.bytedeco.javacv.Frame;import java.io.File;
import java.util.UUID;public class MP4ToAudio {public static void mp4ToAudio(String sourceFilePath) {System.out.println("提取音频文件");File file = new File(sourceFilePath);//抓取资源FFmpegFrameGrabber frameGrabber1 = new FFmpegFrameGrabber(sourceFilePath);Frame frame = null;FFmpegFrameRecorder recorder = null;String fileName = null;try {frameGrabber1.start();fileName = file.getAbsolutePath() + UUID.randomUUID() + ".mp3";System.out.println("--文件名-->>" + fileName);recorder = new FFmpegFrameRecorder(fileName, frameGrabber1.getAudioChannels());recorder.setFormat("mp3");recorder.setSampleRate(frameGrabber1.getSampleRate());recorder.setTimestamp(frameGrabber1.getTimestamp());recorder.setAudioQuality(0);recorder.start();int index = 0;while (true) {frame = frameGrabber1.grab();if (frame == null) {System.out.println("视频处理完成");break;}if (frame.samples != null) {recorder.recordSamples(frame.sampleRate, frame.audioChannels, frame.samples);}System.out.println("帧值=" + index);index++;}recorder.stop();recorder.release();frameGrabber1.stop();} catch (Exception e) {e.printStackTrace();}}public static void main(String[] args) {String sourceFilePath = "D://test.mp4";mp4ToAudio(sourceFilePath);}
}
5.4 音视频剪辑
下面使用第三方工具ffmpeg.exe来处理音视频,如果安装了剪映等工具,可以直接找到它的ffmpeg.exe(非Javacv)
package cv;import java.io.File;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;public class CvCutter {private static String ffmpegEXE = "F:\\JianYing\\bin\\ffmpeg.exe";//上篇文章视频转换为MP4的云盘有可以直接下载的private static List<String> VIDEO_LIST = Arrays.asList("mov", "mpg", "wmv", "3gp", "asf", "asx", "avi", "wmv9", "rm", "rmvb", "flv");private static List<String> AUDIO_LIST = Arrays.asList("mp3", "acm", "wav", "wma", "mp1", "aif");public static Boolean cutVideoOrAudio(String src, String start, String end, String dest) throws Exception {File file = new File(dest);if (file.exists()) {return false;}if (!file.getParentFile().isDirectory()) {file.getParentFile().mkdirs();}List<String> command = getCommonList(src, start, end, dest);ProcessBuilder builder = new ProcessBuilder();Process process = builder.command(command).redirectErrorStream(true).start();process.waitFor();process.destroy();return true;}public static List<String> getCommonList(String src, String start, String end, String dest) {String suffix = src.substring(src.lastIndexOf(".") + 1);List<String> command = new ArrayList<>();if (VIDEO_LIST.contains(suffix)) {command.add(ffmpegEXE);command.add("-ss");command.add(start);command.add("-to");command.add(end);command.add("-i");command.add(src);command.add("-c:v");command.add("libx264");command.add("-c:a");command.add("aac");command.add("-strict");command.add("experimental");command.add("-b:a");command.add("98k");command.add(dest);command.add("-y");} else if (AUDIO_LIST.contains(suffix)) {command.add(ffmpegEXE);command.add("-i");command.add(src);command.add("-ss");command.add(start);command.add("-to");command.add(end);command.add(dest);command.add("-y");} else {throw new RuntimeException("unknown format");}return command;}public static void main(String[] args) throws Exception {String input = "D:\\test.mp3";String out = "D:\\part.mp3";String suffix = input.substring(input.lastIndexOf(".") + 1);System.out.println(suffix);String start = "00:00:10";String end = "00:00:20";CvCutter.cutVideoOrAudio(input, start, end, out);}
}
5.5 录屏
package cv;import org.bytedeco.ffmpeg.global.avcodec;
import org.bytedeco.javacv.FFmpegFrameRecorder;
import org.bytedeco.javacv.Java2DFrameConverter;import java.awt.*;
import java.awt.image.BufferedImage;
import java.time.LocalDateTime;
import java.time.temporal.ChronoUnit;/*** TODO** @author majun* @version 1.0* @since 2023-10-11 20:40*/
public class ScreenRecord {/*** 录屏* @param filename 文件名称* @param seconds 时长*/public static void recordScreen(String filename, int seconds) {final int FRAME_RATE = 30;final Dimension SCREEN_SIZE = Toolkit.getDefaultToolkit().getScreenSize();// 创建录屏对象,并设置相关属性FFmpegFrameRecorder recorder = new FFmpegFrameRecorder(filename, SCREEN_SIZE.width, SCREEN_SIZE.height);recorder.setVideoCodec(avcodec.AV_CODEC_ID_H264);recorder.setFormat("mp4");recorder.setFrameRate(FRAME_RATE);Java2DFrameConverter converter = new Java2DFrameConverter();try {// 初始化录屏对象recorder.start();Robot robot = new Robot();BufferedImage screenShot;// 系统当前时间LocalDateTime now = LocalDateTime.now();System.out.println(now);// 30秒后LocalDateTime plus = now.plus(seconds, ChronoUnit.SECONDS);System.out.println(plus);// 开始录制while (true) {// 获取屏幕截图并写入文件screenShot = robot.createScreenCapture(new Rectangle(SCREEN_SIZE));recorder.record(converter.getFrame(screenShot));// 停止时间LocalDateTime time = LocalDateTime.now();if(plus.isBefore(time)){System.out.println(time);break;}}} catch (Exception e) {e.printStackTrace();} finally {// 关闭录制器try {recorder.stop();} catch (Exception e) {e.printStackTrace();}}}public static void main(String[] args) {recordScreen("screen.mp4",10);}
}
5.6 推流与流媒体播放 参考
首先启动一个流媒体服务器SRS docker run -p 1935:1935 -p 1985:1985 -p 8080:8080 ossrs/srs
,然后运行推流代码,最后用VLC流媒体播放器ctrl+N
访问rtmp://192.168.72.126:1935/live/livestream
(同推流地址)从SRS拉流播放
package cv;import lombok.extern.slf4j.Slf4j;
import org.bytedeco.ffmpeg.avcodec.AVCodecParameters;
import org.bytedeco.ffmpeg.avformat.AVFormatContext;
import org.bytedeco.ffmpeg.avformat.AVStream;
import org.bytedeco.ffmpeg.global.avcodec;
import org.bytedeco.ffmpeg.global.avutil;
import org.bytedeco.javacv.FFmpegFrameGrabber;
import org.bytedeco.javacv.FFmpegFrameRecorder;
import org.bytedeco.javacv.FFmpegLogCallback;
import org.bytedeco.javacv.Frame;/*** @author willzhao* @version 1.0* @description 读取指定的mp4文件,推送到SRS服务器* @date 2021/11/19 8:49*/
@Slf4j
public class PushMp4 {private static final String MP4_FILE_PATH = "D://test.mp4";/*** SRS的推流地址*/private static final String SRS_PUSH_ADDRESS = "rtmp://192.168.72.126:1935/live/livestream";/*** 读取指定的mp4文件,推送到SRS服务器* @param sourceFilePath 视频文件的绝对路径* @param PUSH_ADDRESS 推流地址* @throws Exception*/private static void grabAndPush(String sourceFilePath, String PUSH_ADDRESS) throws Exception {// ffmepg日志级别avutil.av_log_set_level(avutil.AV_LOG_INFO);FFmpegLogCallback.set();// 实例化帧抓取器对象,将文件路径传入FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(MP4_FILE_PATH);long startTime = System.currentTimeMillis();log.info("开始初始化帧抓取器");// 初始化帧抓取器,例如数据结构(时间戳、编码器上下文、帧对象等),// 如果入参等于true,还会调用avformat_find_stream_info方法获取流的信息,放入AVFormatContext类型的成员变量oc中grabber.start(true);log.info("帧抓取器初始化完成,耗时[{}]毫秒", System.currentTimeMillis()-startTime);// grabber.start方法中,初始化的解码器信息存在放在grabber的成员变量oc中AVFormatContext avFormatContext = grabber.getFormatContext();// 文件内有几个媒体流(一般是视频流+音频流)int streamNum = avFormatContext.nb_streams();// 没有媒体流就不用继续了if (streamNum<1) {log.error("文件内不存在媒体流");return;}// 取得视频的帧率int frameRate = (int)grabber.getVideoFrameRate();log.info("视频帧率[{}],视频时长[{}]秒,媒体流数量[{}]",frameRate,avFormatContext.duration()/1000000,avFormatContext.nb_streams());// 遍历每一个流,检查其类型for (int i=0; i< streamNum; i++) {AVStream avStream = avFormatContext.streams(i);AVCodecParameters avCodecParameters = avStream.codecpar();log.info("流的索引[{}],编码器类型[{}],编码器ID[{}]", i, avCodecParameters.codec_type(), avCodecParameters.codec_id());}// 视频宽度int frameWidth = grabber.getImageWidth();// 视频高度int frameHeight = grabber.getImageHeight();// 音频通道数量int audioChannels = grabber.getAudioChannels();log.info("视频宽度[{}],视频高度[{}],音频通道数[{}]",frameWidth,frameHeight,audioChannels);// 实例化FFmpegFrameRecorder,将SRS的推送地址传入FFmpegFrameRecorder recorder = new FFmpegFrameRecorder(SRS_PUSH_ADDRESS,frameWidth,frameHeight,audioChannels);// 设置编码格式recorder.setVideoCodec(avcodec.AV_CODEC_ID_H264);// 设置封装格式recorder.setFormat("flv");// 一秒内的帧数recorder.setFrameRate(frameRate);// 两个关键帧之间的帧数recorder.setGopSize(frameRate);// 设置音频通道数,与视频源的通道数相等recorder.setAudioChannels(grabber.getAudioChannels());startTime = System.currentTimeMillis();log.info("开始初始化帧抓取器");// 初始化帧录制器,例如数据结构(音频流、视频流指针,编码器),// 调用av_guess_format方法,确定视频输出时的封装方式,// 媒体上下文对象的内存分配,// 编码器的各项参数设置recorder.start();log.info("帧录制初始化完成,耗时[{}]毫秒", System.currentTimeMillis()-startTime);Frame frame;startTime = System.currentTimeMillis();log.info("开始推流");long videoTS = 0;int videoFrameNum = 0;int audioFrameNum = 0;int dataFrameNum = 0;// 假设一秒钟15帧,那么两帧间隔就是(1000/15)毫秒int interVal = 1000/frameRate;// 发送完一帧后sleep的时间,不能完全等于(1000/frameRate),不然会卡顿,// 要更小一些,这里取八分之一interVal/=8;// 持续从视频源取帧while (null!=(frame=grabber.grab())) {videoTS = 1000 * (System.currentTimeMillis() - startTime);// 时间戳recorder.setTimestamp(videoTS);// 有图像,就把视频帧加一if (null!=frame.image) {videoFrameNum++;}// 有声音,就把音频帧加一if (null!=frame.samples) {audioFrameNum++;}// 有数据,就把数据帧加一if (null!=frame.data) {dataFrameNum++;}// 取出的每一帧,都推送到SRSrecorder.record(frame);// 停顿一下再推送Thread.sleep(interVal);}log.info("推送完成,视频帧[{}],音频帧[{}],数据帧[{}],耗时[{}]秒",videoFrameNum,audioFrameNum,dataFrameNum,(System.currentTimeMillis()-startTime)/1000);// 关闭帧录制器recorder.close();// 关闭帧抓取器grabber.close();}public static void main(String[] args) throws Exception {grabAndPush(MP4_FILE_PATH, SRS_PUSH_ADDRESS);}
}
5.7 摄像头的几个案例 参考
如果没有摄像头,可以使用手机做摄像头,大致方法是手机安装无他相机,PC安装无他伴侣;手机“关于手机”->狂点系统版本区域打开开发者模式->打开USB调试模式->连接数据线选择“打开文件”,然后手机无法相机进入直播助手,PC无他伴侣选择探测到的手机并点击同步即可。之后的几个案例继承如下抽象基类进行实现
package com.bolingcavalry.grabpush.camera;import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.bytedeco.ffmpeg.global.avutil;
import org.bytedeco.javacv.*;
import org.bytedeco.opencv.global.opencv_imgproc;
import org.bytedeco.opencv.opencv_core.Mat;
import org.bytedeco.opencv.opencv_core.Scalar;import java.text.SimpleDateFormat;
import java.util.Date;/*** @author will* @email zq2599@gmail.com* @date 2021/11/19 8:07 上午* @description 摄像头应用的基础类,这里面定义了拉流和推流的基本流程,子类只需实现具体的业务方法即可*/
@Slf4j
public abstract class AbstractCameraApplication {/*** 摄像头序号,如果只有一个摄像头,那就是0*/protected static final int CAMERA_INDEX = 0;/*** 帧抓取器*/protected FrameGrabber grabber;/*** 输出帧率*/@Getterprivate final double frameRate = 30;/*** 摄像头视频的宽*/@Getterprivate final int cameraImageWidth = 1280;/*** 摄像头视频的高*/@Getterprivate final int cameraImageHeight = 720;/*** 转换器*/private final OpenCVFrameConverter.ToIplImage openCVConverter = new OpenCVFrameConverter.ToIplImage();/*** 实例化、初始化输出操作相关的资源*/protected abstract void initOutput() throws Exception;/*** 输出*/protected abstract void output(Frame frame) throws Exception;/*** 释放输出操作相关的资源*/protected abstract void releaseOutputResource() throws Exception;/*** 两帧之间的间隔时间* @return*/protected int getInterval() {// 假设一秒钟15帧,那么两帧间隔就是(1000/15)毫秒return (int)(1000/ frameRate);}/*** 实例化帧抓取器,默认OpenCVFrameGrabber对象,* 子类可按需要自行覆盖* @throws FFmpegFrameGrabber.Exception*/protected void instanceGrabber() throws FrameGrabber.Exception {grabber = new OpenCVFrameGrabber(CAMERA_INDEX);}/*** 用帧抓取器抓取一帧,默认调用grab()方法,* 子类可以按需求自行覆盖* @return*/protected Frame grabFrame() throws FrameGrabber.Exception {return grabber.grab();}/*** 初始化帧抓取器* @throws Exception*/protected void initGrabber() throws Exception {// 实例化帧抓取器instanceGrabber();// 摄像头有可能有多个分辨率,这里指定// 可以指定宽高,也可以不指定反而调用grabber.getImageWidth去获取,grabber.setImageWidth(cameraImageWidth);grabber.setImageHeight(cameraImageHeight);// 开启抓取器grabber.start();}/*** 预览和输出* @param grabSeconds 持续时长* @throws Exception*/private void grabAndOutput(int grabSeconds) throws Exception {// 添加水印时用到的时间工具SimpleDateFormat simpleDateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");long endTime = System.currentTimeMillis() + 1000L *grabSeconds;// 两帧输出之间的间隔时间,默认是1000除以帧率,子类可酌情修改int interVal = getInterval();// 水印在图片上的位置org.bytedeco.opencv.opencv_core.Point point = new org.bytedeco.opencv.opencv_core.Point(15, 35);Frame captureFrame;Mat mat;// 超过指定时间就结束循环while (System.currentTimeMillis()<endTime) {// 取一帧captureFrame = grabFrame();if (null==captureFrame) {log.error("帧对象为空");break;}// 将帧对象转为mat对象mat = openCVConverter.convertToMat(captureFrame);// 在图片上添加水印,水印内容是当前时间,位置是左上角opencv_imgproc.putText(mat,simpleDateFormat.format(new Date()),point,opencv_imgproc.CV_FONT_VECTOR0,0.8,new Scalar(0, 200, 255, 0),1,0,false);// 子类输出output(openCVConverter.convert(mat));// 适当间隔,让肉感感受不到闪屏即可if(interVal>0) {Thread.sleep(interVal);}}log.info("输出结束");}/*** 释放所有资源*/private void safeRelease() {try {// 子类需要释放的资源releaseOutputResource();} catch (Exception exception) {log.error("do releaseOutputResource error", exception);}if (null!=grabber) {try {grabber.close();} catch (Exception exception) {log.error("close grabber error", exception);}}}/*** 整合了所有初始化操作* @throws Exception*/private void init() throws Exception {long startTime = System.currentTimeMillis();// 设置ffmepg日志级别avutil.av_log_set_level(avutil.AV_LOG_INFO);FFmpegLogCallback.set();// 实例化、初始化帧抓取器initGrabber();// 实例化、初始化输出操作相关的资源,// 具体怎么输出由子类决定,例如窗口预览、存视频文件等initOutput();log.info("初始化完成,耗时[{}]毫秒,帧率[{}],图像宽度[{}],图像高度[{}]",System.currentTimeMillis()-startTime,frameRate,cameraImageWidth,cameraImageHeight);}/*** 执行抓取和输出的操作*/public void action(int grabSeconds) {try {// 初始化操作init();// 持续拉取和推送grabAndOutput(grabSeconds);} catch (Exception exception) {log.error("execute action error", exception);} finally {// 无论如何都要释放资源safeRelease();}}
}
5.7.1 保存摄像头视频为mp4
package cv;import org.bytedeco.ffmpeg.global.avcodec;
import org.bytedeco.javacv.FFmpegFrameRecorder;
import org.bytedeco.javacv.Frame;
import org.bytedeco.javacv.FrameRecorder;import static org.bytedeco.ffmpeg.global.avutil.AV_PIX_FMT_YUV420P;/*** TODO** @author majun* @version 1.0* @since 2023-10-11 22:13*/
public class CameraMp4Recorder extends AbstractCameraApplication{protected FrameRecorder recorder;@Overrideprotected void initOutput() throws Exception {// 实例化FFmpegFrameRecorderrecorder = new FFmpegFrameRecorder("CameraMp4Recorder.mp4", // 存放文件的位置getCameraImageWidth(), // 分辨率的宽,与视频源一致getCameraImageHeight(), // 分辨率的高,与视频源一致0); // 音频通道,0表示无// 文件格式recorder.setFormat("mp4");// 帧率与抓取器一致recorder.setFrameRate(getFrameRate());// 编码格式recorder.setPixelFormat(AV_PIX_FMT_YUV420P);// 编码器类型recorder.setVideoCodec(avcodec.AV_CODEC_ID_MPEG4);// 视频质量,0表示无损recorder.setVideoQuality(0);// 初始化recorder.start();}@Overrideprotected void output(Frame frame) throws Exception {recorder.record(frame);}@Overrideprotected void releaseOutputResource() throws Exception {recorder.close();}public static void main(String[] args) {new CameraMp4Recorder().action(10);}
}
5.7.2 摄像头抓图
package cv;import lombok.extern.slf4j.Slf4j;
import org.bytedeco.javacv.Frame;
import org.bytedeco.javacv.Java2DFrameConverter;import javax.imageio.ImageIO;
import java.awt.image.BufferedImage;
import java.io.FileOutputStream;/*** TODO** @author majun* @version 1.0* @since 2023-10-11 22:35*/
@Slf4j
public class CameraImageGraber extends AbstractCameraApplication{private Java2DFrameConverter converter = new Java2DFrameConverter();@Overrideprotected void initOutput() throws Exception {}@Overrideprotected void output(Frame frame) throws Exception {// 把帧对象转为Image对象BufferedImage bufferedImage = converter.getBufferedImage(frame);ImageIO.write(bufferedImage, "jpg", new FileOutputStream(System.currentTimeMillis()+".jpg"));}@Overrideprotected void releaseOutputResource() throws Exception {}@Overrideprotected int getInterval() {// 每秒1抓return 1000;}public static void main(String[] args) {// 连续十秒执行抓图操作new CameraImageGraber().action(10);}}
5.7.3 摄像头推流
类似之前的本地mp4推流到SRS
package cv;import org.bytedeco.ffmpeg.global.avcodec;
import org.bytedeco.javacv.Frame;
import org.bytedeco.javacv.FrameRecorder;/*** TODO** @author majun* @version 1.0* @since 2023-10-11 22:52*/
public class CameraPushSRS extends AbstractCameraApplication{private static final String RECORD_ADDRESS = "rtmp://192.168.72.126:1935/hls/camera";protected FrameRecorder recorder;protected long startRecordTime = 0L;@Overrideprotected void initOutput() throws Exception {// 实例化FFmpegFrameRecorder,将SRS的推送地址传入recorder = FrameRecorder.createDefault(RECORD_ADDRESS, getCameraImageWidth(), getCameraImageHeight());// 降低启动时的延时,参考// https://trac.ffmpeg.org/wiki/StreamingGuide)recorder.setVideoOption("tune", "zerolatency");// 在视频质量和编码速度之间选择适合自己的方案,包括这些选项:// ultrafast,superfast, veryfast, faster, fast, medium, slow, slower, veryslow// ultrafast offers us the least amount of compression (lower encoder// CPU) at the cost of a larger stream size// at the other end, veryslow provides the best compression (high// encoder CPU) while lowering the stream size// (see: https://trac.ffmpeg.org/wiki/Encode/H.264)// ultrafast对CPU消耗最低recorder.setVideoOption("preset", "ultrafast");// Constant Rate Factor (see: https://trac.ffmpeg.org/wiki/Encode/H.264)recorder.setVideoOption("crf", "28");// 2000 kb/s, reasonable "sane" area for 720recorder.setVideoBitrate(2000000);// 设置编码格式recorder.setVideoCodec(avcodec.AV_CODEC_ID_H264);// 设置封装格式recorder.setFormat("flv");// FPS (frames per second)// 一秒内的帧数recorder.setFrameRate(getFrameRate());// Key frame interval, in our case every 2 seconds -> 30 (fps) * 2 = 60// 关键帧间隔recorder.setGopSize((int)getFrameRate()*2);// 帧录制器开始初始化recorder.start();}@Overrideprotected void output(Frame frame) throws Exception {if (0L==startRecordTime) {startRecordTime = System.currentTimeMillis();}recorder.setTimestamp(1000 * (System.currentTimeMillis()-startRecordTime));recorder.record(frame);}@Overrideprotected void releaseOutputResource() throws Exception {recorder.close();}@Overrideprotected int getInterval() {// 相比本地预览,推流时两帧间隔时间更短return super.getInterval()/4;}public static void main(String[] args) {new CameraPushSRS().action(10);}
}
5.8 人脸识别训练及预测
常见的场景就是公司的门禁系统实现:javacv训练员工人脸图片得到模型,摄像头采集到人脸后使用模型进行预测判断是否为公司员工。
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"><modelVersion>4.0.0</modelVersion><parent><groupId>org.springframework.boot</groupId><artifactId>spring-boot-starter-parent</artifactId><version>2.7.9</version><relativePath/> <!-- lookup parent from repository --></parent><groupId>com.example</groupId><artifactId>demo</artifactId><version>0.0.1-SNAPSHOT</version><name>demo</name><description>demo</description><properties><java.version>17</java.version></properties><dependencies><dependency><groupId>org.springframework.boot</groupId><artifactId>spring-boot-starter</artifactId></dependency><dependency><groupId>org.projectlombok</groupId><artifactId>lombok</artifactId></dependency><dependency><groupId>org.springframework.boot</groupId><artifactId>spring-boot-starter-test</artifactId><scope>test</scope></dependency><dependency><groupId>org.bytedeco</groupId><artifactId>javacv-platform</artifactId><version>1.5.9</version></dependency></dependencies><build><plugins><plugin><groupId>org.springframework.boot</groupId><artifactId>spring-boot-maven-plugin</artifactId></plugin></plugins></build></project>
5.8.1 使用Javacv训练人脸识别模型
package cv;import lombok.SneakyThrows;
import org.bytedeco.opencv.global.opencv_imgcodecs;
import org.bytedeco.opencv.opencv_core.Mat;
import org.bytedeco.opencv.opencv_core.MatVector;
import org.bytedeco.opencv.opencv_core.Size;
import org.bytedeco.opencv.opencv_face.FisherFaceRecognizer;import java.nio.IntBuffer;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.concurrent.atomic.AtomicInteger;import static org.bytedeco.opencv.global.opencv_core.CV_32SC1;
import static org.bytedeco.opencv.global.opencv_imgproc.*;public class Training {@SneakyThrowspublic static void main(String[] args) {// 网上找的30张刘德华存到D:\\1 30张刘亦菲存到D:\\2 ,图片尽量找质量好一点,找多一些或直接找开源人脸检测人脸识别的数据集int imageNum = 60;// 用于存放60张图片矩阵MatVector images = new MatVector(imageNum);Mat lables = new Mat(imageNum, 1, CV_32SC1);IntBuffer lablesBuf = lables.createBuffer();AtomicInteger counter = new AtomicInteger(0);// 读取两个文件夹图片矩阵,调整shape,图片灰度化。文件夹名就是训练for (String dir : Arrays.asList("D:\\1", "D:\\2")) {Files.list(Paths.get(dir)).map(path -> opencv_imgcodecs.imread(path.toFile().getAbsolutePath(), 1)).forEachOrdered(mat -> {Mat resizedMat = new Mat();resize(mat, resizedMat, new Size(300, 400));// 调整shape,百度图片另存为的那些图片大概就300*400Mat grayMat = new Mat();cvtColor(resizedMat, grayMat, COLOR_RGB2GRAY);//灰度int currentIndex = counter.getAndIncrement();images.put(currentIndex, grayMat);lablesBuf.put(currentIndex, Integer.parseInt(dir.substring(dir.length() - 1)));});}//创建人脸分类器,有Fisher、Eigen、LBPHFisherFaceRecognizer fr = FisherFaceRecognizer.create();//训练人脸模型fr.train(images, lables);//保存训练结果fr.save("faceRecognize.xml");fr.close();}
}
5.8.2 使用模型预测人脸照片
package cv;import lombok.SneakyThrows;
import org.bytedeco.javacpp.DoublePointer;
import org.bytedeco.javacpp.IntPointer;
import org.bytedeco.javacv.CanvasFrame;
import org.bytedeco.javacv.Frame;
import org.bytedeco.javacv.Java2DFrameConverter;
import org.bytedeco.javacv.OpenCVFrameConverter;
import org.bytedeco.opencv.opencv_core.*;
import org.bytedeco.opencv.opencv_face.FisherFaceRecognizer;
import org.bytedeco.opencv.opencv_objdetect.CascadeClassifier;import javax.imageio.ImageIO;
import javax.swing.*;
import java.awt.image.BufferedImage;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.List;
import java.util.Random;
import java.util.stream.Collectors;import static org.bytedeco.opencv.global.opencv_imgproc.*;public class Inference {@SneakyThrowspublic static void main(String[] args) {// 加载模型FisherFaceRecognizer faceRecognizer = FisherFaceRecognizer.create();faceRecognizer.read("faceRecognize.xml");//输入人脸与模型中的人脸(这里是1、2)的欧氏距离?小于设定的阈值才会被判断为该人脸faceRecognizer.setThreshold(1300.0);// 新建一个窗口CanvasFrame canvas = new CanvasFrame("人脸检测");canvas.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);OpenCVFrameConverter.ToMat convertor = new OpenCVFrameConverter.ToMat();//用于类型转换while (canvas.isEnabled()) {Mat grayImage = new Mat();Mat face = new Mat();List<String> toTests = Files.list(Paths.get("D:\\2")).map(path -> path.toFile().getAbsolutePath()).collect(Collectors.toList());File file = new File(toTests.get(new Random().nextInt(toTests.size())));BufferedImage image = ImageIO.read(file);Java2DFrameConverter imageConverter = new Java2DFrameConverter();Frame imgFrame = imageConverter.convert(image);//类型转换OpenCVFrameConverter.ToMat converter = new OpenCVFrameConverter.ToMat();Mat scr = converter.convertToMat(imgFrame);cvtColor(scr, grayImage, COLOR_RGB2GRAY);//摄像头是彩色图像,所以先灰度化下//读取opencv人脸检测器,参考我的路径改为自己的路径CascadeClassifier cascade = new CascadeClassifier("E:\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_alt.xml");//检测人脸RectVector faces = new RectVector();cascade.detectMultiScale(grayImage, faces);IntPointer label = new IntPointer(1);DoublePointer confidence = new DoublePointer(1);//识别人脸,一张图可能多个人脸for (int i = 0; i < faces.size(); i++) {Rect rect = faces.get(i);rectangle(scr, rect, new Scalar(0, 255, 0, 1));// 带框选的灰度图Mat grayImageWithRectangle = new Mat(grayImage, rect);resize(grayImageWithRectangle, face, new Size(300, 400));//同训练模型的设定faceRecognizer.predict(face, label, confidence);int predictedLabel = label.get(0);//预测结果System.out.println(predictedLabel);System.gc(); // 内存使用飙升//判断预测结果int pos_x = Math.max(rect.tl().x() - 10, 0);int pos_y = Math.max(rect.tl().y() - 10, 0);putText(scr, predictedLabel == 1 ? "LDF" : predictedLabel == 2 ? "LYF" : "Unknown", new Point(pos_x, pos_y), FONT_HERSHEY_PLAIN, 1.0, new Scalar(0, 255, 0, 2.0));}//显示Frame frame = convertor.convert(scr);canvas.showImage(frame);// 显示有框选及判断Text的图片到窗口Thread.sleep(100);//100毫秒刷新一次图像}}
}
5.8.3 只需要将图片读取人脸改为摄像头抓取即可实现人脸检测并识别
OpenCVFrameGrabber grabber = new OpenCVFrameGrabber(0);grabber.setImageWidth(300);grabber.setImageHeight(400);grabber.start();Frame frame=grabber.grab();