上一篇课文我们分享了java开发人脸采集系统,这次我们分享人脸识别系统,用的还是虹软开放平台。#人工智能##人脸识别##java##python#
原文地址:
一,项目结构
二,人脸录入
主要是通过前端JS调起摄像头拍照片并将照片上传给后端的/faceAdd接口
function getMedia() {
$("#mainDiv").empty();
let videoComp = " <video id='video' width='500px' height='500px' autoplay='autoplay' style='margin-top: 20px'></video><canvas id='canvas' width='500px' height='500px' style='display: none'></canvas>";
$("#mainDiv").append(videoComp);
let constraints = {
video: {width: 500, height: 500},
audio: true
};
//获得video摄像头区域
let video = document.getElementById("video");
//这里介绍新的方法,返回一个 Promise对象
// 这个Promise对象返回成功后的回调函数带一个 MediaStream 对象作为其参数
// then()是Promise对象里的方法
// then()方法是异步执行,当then()前的方法执行完后再执行then()内部的程序
// 避免数据没有获取到
let promise = navigator.mediaDevices.getUserMedia(constraints);
promise.then(function (MediaStream) {
video.srcObject = MediaStream;
video.play();
});
// var t1 = window.setTimeout(function() {
// takePhoto();
// },2000)
}
//拍照事件
function takePhoto() {
let mainComp = $("#mainDiv");
if(mainComp.has('video').length)
{
let userNameInput = $("#userName").val();
if(userNameInput == "")
{
alert("姓名不能为空!");
return false;
}
//获得Canvas对象
let video = document.getElementById("video");
let canvas = document.getElementById("canvas");
let ctx = canvas.getContext('2d');
ctx.drawImage(video, 0, 0, 500, 500);
var formData = new FormData();
var base64File = canvas.toDataURL();
var userName = $("#userName").val();
formData.append("file", base64File);
formData.append("name", userName);
formData.append("groupId", "101");
$.ajax({
type: "post",
url: "/faceAdd",
data: formData,
contentType: false,
processData: false,
async: false,
success: function (text) {
var res = JSON.stringify(text)
if (text.code == 0) {
alert("注册成功")
} else {
alert(text.message)
}
},
error: function (error) {
alert(JSON.stringify(error))
}
});
}
else{
var formData = new FormData();
let userName = $("#userName").val();
formData.append("groupId", "101");
var file = $("#file0")[0].files[0];
var reader = new FileReader();
reader.readAsDataURL(file);
reader.onload = function () {
var base64 = reader.result;
formData.append("file", base64);
formData.append("name",userName);
$.ajax({
type: "post",
url: "/faceAdd",
data: formData,
contentType: false,
processData: false,
async: false,
success: function (text) {
var res = JSON.stringify(text)
if (text.code == 0) {
alert("注册成功")
} else {
alert(text.message)
}
},
error: function (error) {
alert(JSON.stringify(error))
}
});
location.reload();
}
}
}
下面是后端的Controller层,主要有faceAdd接口
faceAdd方法主要使用了
faceEngineService.extractFaceFeature(imageInfo)//人脸特征获取
另外也使用了
userFaceInfoService.insertSelective(userFaceInfo);//人脸特征插入到数据库
@Controller
public class FaceController {
public final static Logger logger = LoggerFactory.getLogger(FaceController.class);
@Autowired
FaceEngineService faceEngineService;
@Autowired
UserFaceInfoService userFaceInfoService;
@RequestMapping(value = "/demo")
public String demo() {
return "demo";
}
/*
人脸添加
*/
@RequestMapping(value = "/faceAdd", method = RequestMethod.POST)
@ResponseBody
public Result<Object> faceAdd(@RequestParam("file") String file, @RequestParam("groupId") Integer groupId, @RequestParam("name") String name) {
try {
if (file == null) {
return Results.newFailedResult("file is null");
}
if (groupId == null) {
return Results.newFailedResult("groupId is null");
}
if (name == null) {
return Results.newFailedResult("name is null");
}
byte[] decode = Base64.decode(base64Process(file));
ImageInfo imageInfo = ImageFactory.getRGBData(decode);
//人脸特征获取
byte[] bytes = faceEngineService.extractFaceFeature(imageInfo);
if (bytes == null) {
return Results.newFailedResult(ErrorCodeEnum.NO_FACE_DETECTED);
}
UserFaceInfo userFaceInfo = new UserFaceInfo();
userFaceInfo.setName(name);
userFaceInfo.setGroupId(groupId);
userFaceInfo.setFaceFeature(bytes);
userFaceInfo.setFaceId(RandomUtil.randomString(10));
//人脸特征插入到数据库
userFaceInfoService.insertSelective(userFaceInfo);
logger.info("faceAdd:" + name);
return Results.newSuccessResult("");
} catch (Exception e) {
logger.error("", e);
}
return Results.newFailedResult(ErrorCodeEnum.UNKNOWN);
}
/*
人脸识别
*/
@RequestMapping(value = "/faceSearch", method = RequestMethod.POST)
@ResponseBody
public Result<FaceSearchResDto> faceSearch(String file, Integer groupId) throws Exception {
if (groupId == null) {
return Results.newFailedResult("groupId is null");
}
byte[] decode = Base64.decode(base64Process(file));
BufferedImage bufImage = ImageIO.read(new ByteArrayInputStream(decode));
ImageInfo imageInfo = ImageFactory.bufferedImage2ImageInfo(bufImage);
//人脸特征获取
byte[] bytes = faceEngineService.extractFaceFeature(imageInfo);
if (bytes == null) {
return Results.newFailedResult(ErrorCodeEnum.NO_FACE_DETECTED);
}
//人脸比对,获取比对结果
List<FaceUserInfo> userFaceInfoList = faceEngineService.compareFaceFeature(bytes, groupId);
if (CollectionUtil.isNotEmpty(userFaceInfoList)) {
FaceUserInfo faceUserInfo = userFaceInfoList.get(0);
FaceSearchResDto faceSearchResDto = new FaceSearchResDto();
BeanUtil.copyProperties(faceUserInfo, faceSearchResDto);
List<ProcessInfo> processInfoList = faceEngineService.process(imageInfo);
if (CollectionUtil.isNotEmpty(processInfoList)) {
//人脸检测
List<FaceInfo> faceInfoList = faceEngineService.detectFaces(imageInfo);
int left = faceInfoList.get(0).getRect().getLeft();
int top = faceInfoList.get(0).getRect().getTop();
int width = faceInfoList.get(0).getRect().getRight() - left;
int height = faceInfoList.get(0).getRect().getBottom() - top;
Graphics2D graphics2D = bufImage.createGraphics();
graphics2D.setColor(Color.RED);//红色
BasicStroke stroke = new BasicStroke(5f);
graphics2D.setStroke(stroke);
graphics2D.drawRect(left, top, width, height);
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
ImageIO.write(bufImage, "jpg", outputStream);
byte[] bytes1 = outputStream.toByteArray();
faceSearchResDto.setImage("data:image/jpeg;base64," + Base64Utils.encodeToString(bytes1));
faceSearchResDto.setAge(processInfoList.get(0).getAge());
faceSearchResDto.setGender(processInfoList.get(0).getGender().equals(1) ? "女" : "男");
}
return Results.newSuccessResult(faceSearchResDto);
}
return Results.newFailedResult(ErrorCodeEnum.FACE_DOES_NOT_MATCH);
}
@RequestMapping(value = "/detectFaces", method = RequestMethod.POST)
@ResponseBody
public List<FaceInfo> detectFaces(String image) throws IOException {
byte[] decode = Base64.decode(image);
InputStream inputStream = new ByteArrayInputStream(decode);
ImageInfo imageInfo = ImageFactory.getRGBData(inputStream);
if (inputStream != null) {
inputStream.close();
}
List<FaceInfo> faceInfoList = faceEngineService.detectFaces(imageInfo);
return faceInfoList;
}
private String base64Process(String base64Str) {
if (!StringUtils.isEmpty(base64Str)) {
String photoBase64 = base64Str.substring(0, 30).toLowerCase();
int indexOf = photoBase64.indexOf("base64,");
if (indexOf > 0) {
base64Str = base64Str.substring(indexOf + 7);
}
return base64Str;
} else {
return "";
}
}
}
三,人脸识别:人像特征对比
人脸识别:将前端传入的图像经过人像特征提取后,和库中已存在的人像信息对比分析。
前端传入的图像需要请求后端的/faceSearch接口
下面是Controller层的FaceController类里面的/faceSearch接口
faceSearch方法主要调用了
faceEngineService.detectFaces(imageInfo);人脸检测、
faceEngineService.extractFaceFeature(imageInfo);人脸特征获取、
faceEngineService.compareFaceFeature(bytes, groupId);人脸比对,获取比对结果。
/*
人脸识别
*/
@RequestMapping(value = "/faceSearch", method = RequestMethod.POST)
@ResponseBody
public Result<FaceSearchResDto> faceSearch(String file, Integer groupId) throws Exception {
if (groupId == null) {
return Results.newFailedResult("groupId is null");
}
byte[] decode = Base64.decode(base64Process(file));
BufferedImage bufImage = ImageIO.read(new ByteArrayInputStream(decode));
ImageInfo imageInfo = ImageFactory.bufferedImage2ImageInfo(bufImage);
//人脸特征获取
byte[] bytes = faceEngineService.extractFaceFeature(imageInfo);
if (bytes == null) {
return Results.newFailedResult(ErrorCodeEnum.NO_FACE_DETECTED);
}
//人脸比对,获取比对结果
List<FaceUserInfo> userFaceInfoList = faceEngineService.compareFaceFeature(bytes, groupId);
if (CollectionUtil.isNotEmpty(userFaceInfoList)) {
FaceUserInfo faceUserInfo = userFaceInfoList.get(0);
FaceSearchResDto faceSearchResDto = new FaceSearchResDto();
BeanUtil.copyProperties(faceUserInfo, faceSearchResDto);
List<ProcessInfo> processInfoList = faceEngineService.process(imageInfo);
if (CollectionUtil.isNotEmpty(processInfoList)) {
//人脸检测
List<FaceInfo> faceInfoList = faceEngineService.detectFaces(imageInfo);
int left = faceInfoList.get(0).getRect().getLeft();
int top = faceInfoList.get(0).getRect().getTop();
int width = faceInfoList.get(0).getRect().getRight() - left;
int height = faceInfoList.get(0).getRect().getBottom() - top;
Graphics2D graphics2D = bufImage.createGraphics();
graphics2D.setColor(Color.RED);//红色
BasicStroke stroke = new BasicStroke(5f);
graphics2D.setStroke(stroke);
graphics2D.drawRect(left, top, width, height);
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
ImageIO.write(bufImage, "jpg", outputStream);
byte[] bytes1 = outputStream.toByteArray();
faceSearchResDto.setImage("data:image/jpeg;base64," + Base64Utils.encodeToString(bytes1));
faceSearchResDto.setAge(processInfoList.get(0).getAge());
faceSearchResDto.setGender(processInfoList.get(0).getGender().equals(1) ? "女" : "男");
}
return Results.newSuccessResult(faceSearchResDto);
}
return Results.newFailedResult(ErrorCodeEnum.FACE_DOES_NOT_MATCH);
}
源码地址
https://github.com/xinzhfiu/ArcSoftFaceDemo/