Created on 2013-8-7
URL : http://blog.sina.com.cn/s/blog_a502f1a30101mfj4.html
@author: zhxfl
转载请说明出处
一 从视频中提取帧并且保存图片
#include <iostream> #include <stdio.h> #include "opencv2/highgui/highgui.hpp" using namespace std; using namespace cv; bool VedioToImage(char *from, char *to) { CvCapture *capture = cvCreateFileCapture(from); if(!capture) return false; IplImage *frame = 0; for(int i = 0, j = 0;; j++) { frame = cvQueryFrame( capture ); if(frame) { if(j % 5 == 0) { char str[1024]; sprintf(str,"%s/%d.png",to,i); cvSaveImage(str,frame); printf("%d.png ",i++); } } else { break; } } return true; } int main() { VedioToImage("../fly.mp4","../fly"); return 0; }
GINC=$(shell pkg-config --cflags opencv) GLIB=$(shell pkg-config --libs opencv) all:build build:a.out a.o:a.cpp g++ $(GINC) -c a.cpp a:a.o g++ -o a a.o $(GLIB) clean: rm *.o run:build ./a.out
二 对一系列图片进行处理,去掉没有检测到人脸的照片
#include <iostream> #include <iomanip> #include "opencv2/core/core.hpp" #include "opencv2/contrib/contrib.hpp" #include "opencv2/objdetect/objdetect.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc_c.h" #include "opencv2/imgproc/imgproc.hpp" #include "opencv2/gpu/gpu.hpp" #include <stdio.h> #include <time.h> using namespace std; using namespace cv; using namespace cv::gpu; std::string getWorkspace() { #if (defined _WIN32 || defined _WIN64) && defined _MSC_VER std::string workspace = std::string("E:/Resources"); #else std::string workspace = std::string("/data/ubuntu/Resources"); #endif return workspace; } string path = getWorkspace() + "/haarcascade_frontalface_alt.xml"; CascadeClassifier cascade_cpu;//使用cpu的分类器器 CascadeClassifier cascade_gpu;//使用cpu的分类器器 int detectFaceCpu(std::string file) { string input = getWorkspace() + string("/image/") + file; Mat frame_cpu = cv::imread(input.c_str()); Mat gray_cpu; //转化成灰度图 if (frame_cpu.channels() == 3) { cvtColor(frame_cpu, gray_cpu, CV_BGR2GRAY ); } else { gray_cpu = frame_cpu; } vector<Rect> facesBuf_cpu; cascade_cpu.detectMultiScale(gray_cpu, facesBuf_cpu); for(size_t i = 0; i < facesBuf_cpu.size();i++) { rectangle(frame_cpu, facesBuf_cpu[i], Scalar(255)); } if(facesBuf_cpu.size()) { string path = getWorkspace() + string("/image_output/") + file; IplImage tmp (frame_cpu); cvSaveImage(path.c_str(), &tmp); } return true; } int detectFaceGpu(std::string file) { string input = getWorkspace() + string("/image/") + file; Mat frame_gpu = cv::imread(input.c_str()); Mat gray_gpu; //转化成灰度图 if (frame_gpu.channels() == 3) { cvtColor(frame_gpu, gray_gpu, CV_BGR2GRAY ); } else { gray_gpu = frame_gpu; } vector<Rect> facesBuf_gpu; cascade_gpu.detectMultiScale(gray_gpu, facesBuf_gpu); for(size_t i = 0; i < facesBuf_gpu.size();i++) { rectangle(frame_gpu, facesBuf_gpu[i], Scalar(255)); } if(facesBuf_gpu.size()) { string path = getWorkspace() + string("/image_output/") + file; IplImage tmp (frame_gpu); cvSaveImage(path.c_str(), &tmp); } return true; } int main(int argc, unsigned char* argv[]) { if (!cascade_cpu.load(path)) { printf("error cpu "); return 0; } if (!cascade_gpu.load(path)) { printf("errror gpu "); return 0; } for(int i = 180; i < 569;i++) { char str[125]; sprintf(str, "%03d.tif", i); float start = clock(); detectFaceGpu(string(str)); float end = clock(); printf("%03d.tif time = %f ",i ,(end - start) / 1000); } return 0; }
三 使用摄像头录制视频
#include <iostream> #include <iomanip> #include "opencv2/core/core.hpp" #include "opencv2/contrib/contrib.hpp" #include "opencv2/objdetect/objdetect.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc_c.h" #include "opencv2/imgproc/imgproc.hpp" #include "opencv2/gpu/gpu.hpp" #include <stdio.h> #include <time.h> using namespace std; using namespace cv; using namespace cv::gpu; std::string getWorkspace() { #if (defined _WIN32 || defined _WIN64) && defined _MSC_VER std::string workspace = std::string("E:/Resources"); #else std::string workspace = std::string("/data/ubuntu/Resources"); #endif return workspace; } int main(int argc, unsigned char* argv[]) { for(int k = 16; k < 100; k++) { char str[125]; sprintf(str, "/%d.avi",k); string path = getWorkspace() + string(str); CvVideoWriter * writer = cvCreateVideoWriter(path.c_str(),CV_FOURCC('X','V','I','D'),20,cvSize(640,480)); // 需要安装xvid编码器 CvCapture* capture1 = cvCaptureFromCAM(0); int w = 640, h = 480; cvSetCaptureProperty ( capture1, CV_CAP_PROP_FRAME_WIDTH, w ); cvSetCaptureProperty ( capture1, CV_CAP_PROP_FRAME_HEIGHT, h ); cvNamedWindow( "Camera_1", CV_WINDOW_AUTOSIZE ); IplImage* frame1; int n = 2; int i =0; while(1) { frame1 = cvQueryFrame( capture1 ); if( !frame1 ) break; { cvShowImage( "Camera_1", frame1 ); cvWriteFrame(writer,frame1); //写入文件 } int key = cvWaitKey(10); if( key == 27 ) break; if(i >=500 )break; else i++; printf("%d ",i); } cvReleaseVideoWriter(&writer); cvReleaseCapture( &capture1 ); cvDestroyWindow( "Camera_1" ); } return 0; }
四 将视频流分成两部分,一部分是能够圈到人脸,一部分不能。
#include <iostream> #include <iomanip> #include "opencv2/core/core.hpp" #include "opencv2/contrib/contrib.hpp" #include "opencv2/objdetect/objdetect.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc_c.h" #include "opencv2/imgproc/imgproc.hpp" #include "opencv2/gpu/gpu.hpp" #include <stdio.h> #include <time.h> using namespace std; using namespace cv; using namespace cv::gpu; std::string getWorkspace() { #if (defined _WIN32 || defined _WIN64) && defined _MSC_VER std::string workspace = std::string("E:/Resources"); #else std::string workspace = std::string("/data/hadoop/Resources"); #endif return workspace; } CascadeClassifier cascade_cpu;//使用cpu的分类器器 CascadeClassifier_GPU cascade_gpu;//使用cpu的分类器器 bool detectFaceCpu(Mat &frame_cpu) { Mat gray_cpu; //转化成灰度图 if (frame_cpu.channels() == 3) { cvtColor(frame_cpu, gray_cpu, CV_BGR2GRAY ); } else { gray_cpu = frame_cpu; } vector<Rect> facesBuf_cpu; cascade_cpu.detectMultiScale(gray_cpu, facesBuf_cpu, 1.2, 4); for(size_t i = 0; i < facesBuf_cpu.size();i++) { rectangle(frame_cpu, facesBuf_cpu[i], Scalar(255)); } if(facesBuf_cpu.size()) { return true; } else { return false; } } int detectFaceGpu(Mat &frame) { GpuMat frame_gpu(frame); GpuMat gray_gpu; //转化成灰度图 if (frame_gpu.channels() == 3) { cvtColor(frame_gpu, gray_gpu, CV_BGR2GRAY ); } else { gray_gpu = frame_gpu; } GpuMat facesBuf_gpu; Mat faces_downloaded; cascade_gpu.findLargestObject = true; int detections_num = cascade_gpu.detectMultiScale(gray_gpu, facesBuf_gpu, 1.2, 4); facesBuf_gpu.colRange(0, detections_num).download(faces_downloaded); for (int i = 0; i < detections_num; ++i) { rectangle(frame, faces_downloaded.ptr<cv::Rect>()[i], Scalar(255)); } if(detections_num) { return true; } else { return false; } } void decompound_cpu(string in,string out_1, string out_2) { //摄像头对象 CvCapture *capture = cvCreateFileCapture(in.c_str()); int w = 640, h = 480; cvSetCaptureProperty ( capture, CV_CAP_PROP_FRAME_WIDTH, w ); cvSetCaptureProperty ( capture, CV_CAP_PROP_FRAME_HEIGHT, h ); CvVideoWriter * writer1 = cvCreateVideoWriter(out_1.c_str(),CV_FOURCC('X','V','I','D'),20,cvSize(w,h)); CvVideoWriter * writer2 = cvCreateVideoWriter(out_2.c_str(),CV_FOURCC('X','V','I','D'),20,cvSize(w,h)); IplImage* frame; cvNamedWindow("Camera_1", CV_WINDOW_AUTOSIZE ); cvNamedWindow("Camera_2", CV_WINDOW_AUTOSIZE ); static int i = 0; while(1) { frame = cvQueryFrame(capture); Mat mat(frame); if(mat.empty()) { printf("finish !!! "); break; } if(detectFaceGpu(mat)) { cvShowImage("Camera_1", frame); printf("y = %d ", i++); cvWriteFrame(writer1, frame); //写入文件 } else { printf("n = %d ", i++); cvShowImage( "Camera_2", frame); cvWriteFrame(writer2, frame); //写入文件 } int key = cvWaitKey(2); if( key == 27 ) break; } cvReleaseVideoWriter(&writer1); cvReleaseVideoWriter(&writer2); cvReleaseCapture(&capture); cvDestroyWindow("Camera_1"); cvDestroyWindow("Camera_2"); } bool init() { string path = getWorkspace() + "/haarcascade_frontalface_alt.xml"; if (!cascade_cpu.load(path)) { printf("error cascade_cpu load "); return 0; } if (!cascade_gpu.load(path)) { printf("errror cascade_gpu load "); return 0; } return true; } int main(int argc, char* argv[]) { init(); string x1 = getWorkspace() + string("/4.avi"); string x2 = getWorkspace() + string("/image_output/4_1.avi"); string x3 = getWorkspace() + string("/image_output/4_2.avi"); decompound_cpu(x1,x2,x3); return 0; }
五从视频流中提取出某个人的视频来
#include <iostream> #include <iomanip> #include "opencv2/core/core.hpp" #include "opencv2/contrib/contrib.hpp" #include "opencv2/objdetect/objdetect.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc_c.h" #include "opencv2/imgproc/imgproc.hpp" #include "opencv2/gpu/gpu.hpp" #include <stdio.h> #include <time.h> #include "personName.h" #include "util.h" using namespace std; using namespace cv; using namespace cv::gpu; CascadeClassifier cascade_cpu;//使用cpu的分类器器 CascadeClassifier_GPU cascade_gpu;//使用cpu的分类器器 int predict_id = -10; float s_confidence = 2000; IplImage * getFace(IplImage *img, CvRect rect) { static int id = 1; cvSetImageROI(img,rect); IplImage *tmp; tmp=cvCreateImage(cvSize(80,80),IPL_DEPTH_8U,1); cvResize(img,tmp,CV_INTER_LINEAR); cvResetImageROI(img); return tmp; } int predict(Mat &mat) { static bool init = true; static Ptr<FaceRecognizer> model; if(init) { model = createEigenFaceRecognizer(200); string str = getWorkspace() + string("/face.xml"); model->load(str.c_str()); init = false; } int predictedLabel = -1; double confidence = 0.0; model->predict(mat, predictedLabel, confidence); string result_message = format("%10s confidence = %lf", PersonName::build()->getName(predictedLabel).c_str(), confidence); printf("%s ",result_message.c_str()); if(confidence < s_confidence) return predictedLabel; else return -1; } bool detectFaceCpu(Mat &frame_cpu) { Mat gray_cpu; //转化成灰度图 if (frame_cpu.channels() == 3) { cvtColor(frame_cpu, gray_cpu, CV_BGR2GRAY ); } else { gray_cpu = frame_cpu; } vector<Rect> facesBuf_cpu; cascade_cpu.detectMultiScale(gray_cpu, facesBuf_cpu, 1.2, 4); IplImage img(gray_cpu); for(size_t i = 0; i < facesBuf_cpu.size();i++) { rectangle(frame_cpu, facesBuf_cpu[i], Scalar(255)); Mat tmp(getFace(&img, facesBuf_cpu[i])); int id = predict(tmp); if(id != predict_id)continue; return true; } return false; } int detectFaceGpu(Mat &frame) { GpuMat frame_gpu(frame); GpuMat gray_gpu; Mat gray_cpu; //转化成灰度图 if (frame_gpu.channels() == 3) { cvtColor(frame_gpu, gray_gpu, CV_BGR2GRAY); cvtColor(frame, gray_cpu, CV_BGR2GRAY); } else { gray_gpu = frame_gpu; gray_cpu = frame; } GpuMat facesBuf_gpu; Mat faces_downloaded; cascade_gpu.findLargestObject = true; int detections_num = cascade_gpu.detectMultiScale(gray_gpu, facesBuf_gpu, 1.2, 4); facesBuf_gpu.colRange(0, detections_num).download(faces_downloaded); IplImage img(gray_cpu); for (int i = 0; i < detections_num; ++i) { rectangle(frame, faces_downloaded.ptr<cv::Rect>()[i], Scalar(255)); Mat tmp(getFace(&img, faces_downloaded.ptr<cv::Rect>()[i])); int id = predict(tmp); if(id != predict_id)continue; return true; } return false; } void decompound_cpu(string in,string out_1, string out_2) { //摄像头对象 CvCapture *capture = cvCreateFileCapture(in.c_str()); int w = 640, h = 480; cvSetCaptureProperty ( capture, CV_CAP_PROP_FRAME_WIDTH, w ); cvSetCaptureProperty ( capture, CV_CAP_PROP_FRAME_HEIGHT, h ); CvVideoWriter * writer1 = cvCreateVideoWriter(out_1.c_str(),CV_FOURCC('X','V','I','D'),20,cvSize(w,h)); CvVideoWriter * writer2 = cvCreateVideoWriter(out_2.c_str(),CV_FOURCC('X','V','I','D'),20,cvSize(w,h)); IplImage* frame; cvNamedWindow("Camera_1", CV_WINDOW_AUTOSIZE ); cvNamedWindow("Camera_2", CV_WINDOW_AUTOSIZE ); static int i = 0; while(1) { frame = cvQueryFrame(capture); Mat mat(frame); if(mat.empty()) { printf("finish !!! "); break; } if(detectFaceGpu(mat)) { cvShowImage("Camera_1", frame); printf("y = %d ", i++); cvWriteFrame(writer1, frame); //写入文件 } else { printf("n = %d ", i++); cvShowImage( "Camera_2", frame); cvWriteFrame(writer2, frame); //写入文件 } int key = cvWaitKey(2); if( key == 27 ) break; } cvReleaseVideoWriter(&writer1); cvReleaseVideoWriter(&writer2); cvReleaseCapture(&capture); cvDestroyWindow("Camera_1"); cvDestroyWindow("Camera_2"); } bool init() { predict_id = 1; s_confidence = 2000; string path = getWorkspace() + "/haarcascade_frontalface_alt.xml"; if (!cascade_cpu.load(path)) { printf("error cascade_cpu load "); return 0; } if (!cascade_gpu.load(path)) { printf("errror cascade_gpu load "); return 0; } return true; } int main(int argc, char* argv[]) { init(); string x1 = getWorkspace() + string("/17.avi"); string x2 = getWorkspace() + string("/image_output/17_1.avi"); string x3 = getWorkspace() + string("/image_output/17_2.avi"); decompound_cpu(x1,x2,x3); return 0; }