最近学习人脸识别相关的东西,在MFC下使用OpenCV做了一个简单的应用。训练需要较多的数据,windows应用程序终究还是不方便,于是想着做成CS模式:检测识别都放在服务器端,视频获取和显示都放在网页端。
在网上找了一些资料,实现了简单的人脸检测。人脸识别只要在这个框架上加点代码就行。主要参考了下面这篇文章:
http://www.open-open.com/home/space-361-do-blog-id-8960.html
jetty版本:jetty-9.2.17.v20160517
javacv版本:1.2
首先是html代码,主要实现:
- 获取视频并显示(html5, webrtc, javascript);
- 通过websocket传输视频帧;
- 接收并显示服务器端返回的图像数据(包含人脸检测结果)
1 <!doctype html> 2 <html lang="zh-CN"> 3 <head> 4 <meta charset="UTF-8"> 5 <title>FaceDetect</title> 6 </head> 7 <body> 8 9 <div style="visibility:hidden; width:0; height:0;"> 10 <canvas id="canvas" width="320" height="240"></canvas> 11 </div> 12 13 <div> 14 <video id="video" autoplay style="display: inline;"></video> 15 <img id="target" style="display:inline;"/> 16 </div> 17 18 <script type="text/javascript"> 19 20 var ws = new WebSocket("ws://127.0.0.1:2014/"); 21 ws.binaryType = "arraybuffer"; 22 23 ws.onopen = function() { 24 ws.send("I‘m client"); 25 }; 26 27 ws.onmessage = function (evt) { 28 var bytes = new Uint8Array(evt.data); 29 var data = ""; 30 var len = bytes.byteLength; 31 for (var i = 0; i < len; ++i) { 32 data += String.fromCharCode(bytes[i]); 33 } 34 var img = document.getElementById("target"); 35 img.src = "data:image/png;base64,"+window.btoa(data); 36 }; 37 38 ws.onclose = function() { 39 alert("Closed"); 40 }; 41 42 ws.onerror = function(err) { 43 alert("Error: " + err); 44 }; 45 46 var getUserMedia = (navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia); 47 48 var video = document.getElementById(‘video‘); 49 var canvas = document.getElementById(‘canvas‘); 50 var ctx = canvas.getContext(‘2d‘); 51 52 getUserMedia.call(navigator, { 53 video: true, 54 audio: true 55 }, function(localMediaStream) { 56 video.src = window.URL.createObjectURL(localMediaStream); 57 video.onloadedmetadata = function(e) { 58 console.log("Label: " + localMediaStream.label); 59 console.log("AudioTracks" , localMediaStream.getAudioTracks()); 60 console.log("VideoTracks" , localMediaStream.getVideoTracks()); 61 }; 62 }, function(e) { 63 console.log(‘Reeeejected!‘, e); 64 }); 65 66 function dataURItoBlob(dataURI) { 67 // convert base64/URLEncoded data component to raw binary data held in a string 68 var byteString; 69 if (dataURI.split(‘,‘)[0].indexOf(‘base64‘) >= 0) 70 byteString = atob(dataURI.split(‘,‘)[1]); 71 else 72 byteString = unescape(dataURI.split(‘,‘)[1]); 73 74 // separate out the mime component 75 var mimeString = dataURI.split(‘,‘)[0].split(‘:‘)[1].split(‘;‘)[0]; 76 77 // write the bytes of the string to a typed array 78 var ia = new Uint8Array(byteString.length); 79 for (var i = 0; i < byteString.length; i++) { 80 ia[i] = byteString.charCodeAt(i); 81 } 82 83 return new Blob([ia], {type:mimeString}); 84 } 85 86 timer = setInterval( 87 function () { 88 ctx.drawImage(video, 0, 0, 320, 240); 89 var data = canvas.toDataURL(‘image/jpeg‘, 1.0); 90 newblob = dataURItoBlob(data); 91 ws.send(newblob); 92 }, 250); 93 </script> 94 </body> 95 96 </html>
facedetect.html
然后是服务器端代码(jetty, websocket, javacv),主要实现:
- 接收客户传送的视频帧数据;
- 使用JavaCV实现人脸检测;
- 在原始图像上绘制检测结果,将新图像返回给客户
1 package com.husthzy.face; 2 3 import org.eclipse.jetty.server.Server; 4 5 public class WebsocketServer extends Thread { 6 @Override 7 public void run() { 8 super.run(); 9 10 try { 11 Server server = new Server(2014); 12 server.setHandler(new FaceDetectionHandler()); 13 server.setStopTimeout(0); 14 server.start(); 15 server.join(); 16 } catch (Exception e) { 17 e.printStackTrace(); 18 } 19 } 20 21 public static void main(String[] args) { 22 WebsocketServer mWebSocketServer = new WebsocketServer(); 23 mWebSocketServer.start(); 24 } 25 }
WebsocketServer.java
1 package com.husthzy.face; 2 3 import static org.bytedeco.javacpp.opencv_core.CV_8UC1; 4 import static org.bytedeco.javacpp.opencv_imgcodecs.cvDecodeImage; 5 import static org.bytedeco.javacpp.opencv_imgproc.COLOR_BGRA2GRAY; 6 import static org.bytedeco.javacpp.opencv_imgproc.cvtColor; 7 import static org.bytedeco.javacpp.opencv_imgproc.equalizeHist; 8 import static org.bytedeco.javacpp.opencv_imgproc.rectangle; 9 10 import java.awt.image.BufferedImage; 11 import java.io.ByteArrayOutputStream; 12 import java.io.IOException; 13 import java.nio.ByteBuffer; 14 import java.util.ArrayList; 15 16 import javax.imageio.ImageIO; 17 18 import org.bytedeco.javacpp.BytePointer; 19 import org.bytedeco.javacpp.opencv_core; 20 import org.bytedeco.javacpp.opencv_core.IplImage; 21 import org.bytedeco.javacpp.opencv_core.Mat; 22 import org.bytedeco.javacpp.opencv_core.Rect; 23 import org.bytedeco.javacpp.opencv_core.RectVector; 24 import org.bytedeco.javacpp.opencv_core.Scalar; 25 import org.bytedeco.javacpp.opencv_objdetect.CascadeClassifier; 26 import org.bytedeco.javacv.Frame; 27 import org.bytedeco.javacv.Java2DFrameConverter; 28 import org.bytedeco.javacv.OpenCVFrameConverter.ToMat; 29 import org.eclipse.jetty.websocket.api.Session; 30 import org.eclipse.jetty.websocket.api.annotations.OnWebSocketClose; 31 import org.eclipse.jetty.websocket.api.annotations.OnWebSocketConnect; 32 import org.eclipse.jetty.websocket.api.annotations.OnWebSocketError; 33 import org.eclipse.jetty.websocket.api.annotations.OnWebSocketMessage; 34 import org.eclipse.jetty.websocket.api.annotations.WebSocket; 35 import org.eclipse.jetty.websocket.server.WebSocketHandler; 36 import org.eclipse.jetty.websocket.servlet.WebSocketServletFactory; 37 38 @WebSocket 39 public class FaceDetectionHandler extends WebSocketHandler { 40 41 private static final String CASCADE_FILE = "haarcascade_frontalface_alt.xml"; 42 43 private Session mSession; 44 private static ArrayList<FaceDetectionHandler> sessions = new ArrayList<FaceDetectionHandler>(); 45 46 private CascadeClassifier face_cascade = new CascadeClassifier(CASCADE_FILE); 47 48 public static ArrayList<FaceDetectionHandler> getAllSessions() { 49 return sessions; 50 } 51 52 @Override 53 public void configure(WebSocketServletFactory factory) { 54 factory.register(FaceDetectionHandler.class); 55 factory.getPolicy().setMaxBinaryMessageSize(1024 * 512); 56 } 57 58 @OnWebSocketClose 59 public void onClose(int statusCode, String reason) { 60 sessions.remove(this); 61 System.out.println( 62 "Close: statusCode = " + statusCode + ", reason = " + reason + ", sessions = " + sessions.size()); 63 } 64 65 @OnWebSocketError 66 public void onError(Throwable t) { 67 System.out.println("Error: " + t.getMessage()); 68 } 69 70 @OnWebSocketConnect 71 public void onConnect(Session session) { 72 mSession = session; 73 sessions.add(this); 74 75 System.out.println("Connect: " + session.getRemoteAddress().getAddress()); 76 } 77 78 @OnWebSocketMessage 79 public void onMessage(String message) { 80 System.out.println("Message: " + message); 81 } 82 83 @OnWebSocketMessage 84 public void onBinaryMessage(byte data[], int offset, int length) { 85 System.out.println("Binary Message len:" + length); 86 if (length > 10000) { 87 try { 88 byte[] sdata = process(data); 89 ByteBuffer byteBuffer = ByteBuffer.wrap(sdata); 90 mSession.getRemote().sendBytes(byteBuffer); 91 byteBuffer.clear(); 92 } catch (IOException e) { 93 e.printStackTrace(); 94 } 95 } 96 } 97 98 public byte[] process(byte data[]) { 99 IplImage originalImage = cvDecodeImage(opencv_core.cvMat(1, data.length, CV_8UC1, new BytePointer(data))); 100 101 Mat videoMat = new Mat(originalImage); 102 Mat videoMatGray = new Mat(); 103 // Convert the current frame to grayscale: 104 cvtColor(videoMat, videoMatGray, COLOR_BGRA2GRAY); 105 equalizeHist(videoMatGray, videoMatGray); 106 107 // Point p = new Point(); 108 RectVector faces = new RectVector(); 109 face_cascade.detectMultiScale(videoMatGray, faces); 110 for (int i = 0; i < faces.size(); i++) { 111 Rect face_i = faces.get(i); 112 113 //Mat face = new Mat(videoMatGray, face_i); 114 // If fisher face recognizer is used, the face need to be 115 // resized. 116 // resize(face, face_resized, new Size(im_width, im_height), 117 // 1.0, 1.0, INTER_CUBIC); 118 119 // Now perform the prediction, see how easy that is: 120 // int prediction = lbphFaceRecognizer.predict(face); 121 122 // And finally write all we‘ve found out to the original image! 123 // First of all draw a green rectangle around the detected face: 124 rectangle(videoMat, face_i, new Scalar(0, 255, 0, 1)); 125 126 System.out.println("face pos: x:" + face_i.x() + " y:" + face_i.y()); 127 128 // Create the text we will annotate the box with: 129 //String box_text = "Prediction = " + prediction; 130 // Calculate the position for annotated text (make sure we don‘t 131 // put illegal values in there): 132 //int pos_x = Math.max(face_i.tl().x() - 10, 0); 133 //int pos_y = Math.max(face_i.tl().y() - 10, 0); 134 // And now put it into the image: 135 //putText(videoMat, box_text, new Point(pos_x, pos_y), FONT_HERSHEY_PLAIN, 1.0, new Scalar(0, 255, 0, 2.0)); 136 } 137 138 // JavaCVUtil.imShow(videoMat, "test"); 139 140 return getMatByteBuffer(videoMat); 141 } 142 143 private byte[] getMatByteBuffer(Mat m) { 144 byte[] result = null; 145 try { 146 ToMat convert = new ToMat(); 147 Frame frame = convert.convert(m); 148 Java2DFrameConverter java2dFrameConverter = new Java2DFrameConverter(); 149 BufferedImage bufferedImage = java2dFrameConverter.convert(frame); 150 ByteArrayOutputStream out = new ByteArrayOutputStream(); 151 ImageIO.write(bufferedImage, "png", out); 152 result = out.toByteArray(); 153 out.close(); 154 } catch (IOException exception) { 155 exception.printStackTrace(); 156 } 157 return result; 158 } 159 }
FaceDetectionhandler.java
时间: 2024-11-11 21:44:18