我使用的是opencv2.4.9。安装后。我的cvboost..cpp文件的路径是........opencvsourcesappshaartrainingcvhaartraining.cpp,研究源代码那么多天,有非常多收获。opencv库真是非常强大。当中在这篇博文中我有部分凝视,其它的有关知识请參考我博客http://blog.csdn.net/ding977921830?viewmode=contents。详细内容例如以下:
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
/*
* cvhaartraining.cpp
*
* training of cascade of boosted classifiers based on haar features
*參考资料:
*1 http://www.sjsjw.com/kf_www/article/000119ABA007840.asp
*2 http://www.opencvchina.com/thread-129-1-1.html
*/
#include "cvhaartraining.h"
#include "_cvhaartraining.h"
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <climits>
#include <ctype.h>
#include "highgui.h"
#ifdef CV_VERBOSE
#include <ctime>
#ifdef _WIN32
/* use clock() function insted of time() */
#define TIME( arg ) (((double) clock()) / CLOCKS_PER_SEC)
#else
#define TIME( arg ) (time( arg ))
#endif /* _WIN32 */
#endif /* CV_VERBOSE */
#if defined CV_OPENMP && (defined _MSC_VER || defined CV_ICC)
#define CV_OPENMP 1
#else
#undef CV_OPENMP
#endif
typedef struct CvBackgroundData
{
int count;
char** filename;
int last;
int round;
CvSize winsize;
} CvBackgroundData;
typedef struct CvBackgroundReader
{
CvMat src;
CvMat img;
CvPoint offset;
float scale;
float scalefactor;
float stepfactor;
CvPoint point;
} CvBackgroundReader;
/*
* Background reader
* Created in each thread
*/
CvBackgroundReader* cvbgreader = NULL;
#if defined CV_OPENMP
#pragma omp threadprivate(cvbgreader)
#endif
CvBackgroundData* cvbgdata = NULL;
/*
* get sum image offsets for <rect> corner points
* step - row step (measured in image pixels!) of sum image
*/
//CV_SUM_OFFSETS( p0, p1, p2, p3, rect, step )表示一个矩形的四个顶点,当中,以左上角的点为參考点
#define CV_SUM_OFFSETS( p0, p1, p2, p3, rect, step )
/* (x, y) */
(p0) = (rect).x + (step) * (rect).y;
/* (x + w, y) */
(p1) = (rect).x + (rect).width + (step) * (rect).y;
/* (x + w, y) */
(p2) = (rect).x + (step) * ((rect).y + (rect).height);
/* (x + w, y + h) */
(p3) = (rect).x + (rect).width + (step) * ((rect).y + (rect).height);
/*
* get tilted image offsets for <rect> corner points
* step - row step (measured in image pixels!) of tilted image
*/
//CV_TILTED_OFFSETS( p0, p1, p2, p3, rect, step ) 表示一个旋转45度矩形的四个顶点,当中以最上面那个点为參考点(x,y)
#define CV_TILTED_OFFSETS( p0, p1, p2, p3, rect, step )
/* (x, y) */
(p0) = (rect).x + (step) * (rect).y;
/* (x - h, y + h) */
(p1) = (rect).x - (rect).height + (step) * ((rect).y + (rect).height);
/* (x + w, y + w) */
(p2) = (rect).x + (rect).width + (step) * ((rect).y + (rect).width);
/* (x + w - h, y + w + h) */
(p3) = (rect).x + (rect).width - (rect).height
+ (step) * ((rect).y + (rect).width + (rect).height);
/*
* icvCreateIntHaarFeatures
*
* Create internal representation of haar features
*
* mode:
* 0 - BASIC = Viola
* 1 - CORE = All upright
* 2 - ALL = All features
*/
//功能:针对大小为winsize的图像,计算全部的HaarFeature的rect,并存入features 返回输入
static
CvIntHaarFeatures* icvCreateIntHaarFeatures( CvSize winsize,
int mode,
/*
mode=0,仅仅计算例如以下6种特征haar_x2,haar_y2,haar_x3,haar_y3,haar_y4,haar_x2_y2
mode=1,仅仅计算例如以下8种特征haar_x2,haar_y2,haar_x3,haar_y3,haar_x4,haar_y4,haar_point
mode=2,计算所以特征,例如以下:haar_x2,haar_y2,haar_x3,haar_y3,haar_x4,haar_y4,haar_point,tilted_haar_x2。tilted_haar_y2,
tilted_haar_x3,tilted_haar_y3。tilted_haar_x4。tilted_haar_y4。*/
int symmetric )/*表示目标图形是否为垂直对称.Symmetric 为 1
表示仅仅创建 Haar 特征的中心在左半部分的全部特征,为 0 时创建全部特征。当训练人脸图像时,由于人脸的左右对称性能够设置 Symmetric 为 1,以加速训练。 */
{
CvIntHaarFeatures* features = NULL;//存储全部Haar特征的结构体,Haar特征由指针CvTHaarFeature所指向。
CvTHaarFeature haarFeature; //一个Haar特征由2到3个具有对应权重的矩形组成。
//这几个矩形的权重符号相反,而且权重的绝对值跟矩形的大小成反比。
/*内存存储器是一个可用来存储诸如序列,轮廓,图形,子划分等动态增长数据结构的底层结构。
它是由一系列以同等大小的内存块构成,呈列表型,本句选自https://code.csdn.net/snippets/632568/master/.cpp/raw*/
CvMemStorage* storage = NULL;
CvSeq* seq = NULL;
CvSeqWriter writer;
int s0 = 36; /* minimum total area size of basic haar feature */
int s1 = 12; /* minimum total area size of tilted haar features 2 */
int s2 = 18; /* minimum total area size of tilted haar features 3 */
int s3 = 24; /* minimum total area size of tilted haar features 4 */
int x = 0;
int y = 0; //(x,y)表示小矩形的位置
int dx = 0; //dx表示小矩形的宽
int dy = 0; //dy表示小矩形的高
///////////////////////////////////////////////////////////////////////////////
//计算缩放因子factor
//winsize是训练样本的大小
#if 0
float factor = 1.0F;
factor = ((float) winsize.width) * winsize.height / (24 * 24);
s0 = (int) (s0 * factor);
s1 = (int) (s1 * factor);
s2 = (int) (s2 * factor);
s3 = (int) (s3 * factor);
#else
s0 = 1;
s1 = 1;
s2 = 1;
s3 = 1;
#endif
/* CV_VECTOR_CREATE( vec, CvIntHaarFeature, size, maxsize ) */
storage = cvCreateMemStorage();
//功能:创建新序列。并初始化写入部分
cvStartWriteSeq( 0, sizeof( CvSeq ), sizeof( haarFeature ), storage, &writer );
for( x = 0; x < winsize.width; x++ )
{
for( y = 0; y < winsize.height; y++ )
{
for( dx = 1; dx <= winsize.width; dx++ )
{
for( dy = 1; dy <= winsize.height; dy++ )
{
// haar_x2
if ( (x+dx*2 <= winsize.width) && (y+dy <= winsize.height) ) {
if (dx*2*dy < s0) continue;///????????
??
?
????
????
??
if (!symmetric || (x+x+dx*2 <=winsize.width)) {//???
????
??
?
?
??
haarFeature = cvHaarFeature( "haar_x2",//类型
x, y, dx*2, dy, -1, //[x,y,dx*2,dy]是一个小矩形左上角的位置和高和宽,-1是其权重
x+dx, y, dx , dy, +2 ); //[x,y,dx*2,dy]是一个小矩形左上角的位置和高和宽,2是其权重
//第一个小矩形中的像素和乘以-1。然后加上第二个小矩形的像素和乘以2,也就是水平方向上两个等宽的小矩形。用右边的减去左边的.黑色为+,白色为-
/* CV_VECTOR_PUSH( vec, CvIntHaarFeature, haarFeature, size, maxsize, step ) */
CV_WRITE_SEQ_ELEM( haarFeature, writer );
}
}
// haar_y2
if ( (x+dx <= winsize.width) && (y+dy*2 <= winsize.height) ) {
if (dx*2*dy < s0) continue;
if (!symmetric || (x+x+dx <= winsize.width)) {
haarFeature = cvHaarFeature( "haar_y2",
x, y, dx, dy*2, -1,
x, y+dy, dx, dy, +2 );
CV_WRITE_SEQ_ELEM( haarFeature, writer );
}
}
// haar_x3
if ( (x+dx*3 <= winsize.width) && (y+dy <= winsize.height) ) {
if (dx*3*dy < s0) continue;
if (!symmetric || (x+x+dx*3 <=winsize.width)) {
haarFeature = cvHaarFeature( "haar_x3",
x, y, dx*3, dy, -1,
x+dx, y, dx, dy, +3 );
CV_WRITE_SEQ_ELEM( haarFeature, writer );
}
}
// haar_y3
if ( (x+dx <= winsize.width) && (y+dy*3 <= winsize.height) ) {
if (dx*3*dy < s0) continue;
if (!symmetric || (x+x+dx <= winsize.width)) {
haarFeature = cvHaarFeature( "haar_y3",
x, y, dx, dy*3, -1,
x, y+dy, dx, dy, +3 );
CV_WRITE_SEQ_ELEM( haarFeature, writer );
}
}
if( mode != 0 /*BASIC*/ ) {
// haar_x4
if ( (x+dx*4 <= winsize.width) && (y+dy <= winsize.height) ) {
if (dx*4*dy < s0) continue;
if (!symmetric || (x+x+dx*4 <=winsize.width)) {
haarFeature = cvHaarFeature( "haar_x4",
x, y, dx*4, dy, -1,
x+dx, y, dx*2, dy, +2 );
CV_WRITE_SEQ_ELEM( haarFeature, writer );
}
}
// haar_y4
if ( (x+dx <= winsize.width ) && (y+dy*4 <= winsize.height) ) {
if (dx*4*dy < s0) continue;
if (!symmetric || (x+x+dx <=winsize.width)) {
haarFeature = cvHaarFeature( "haar_y4",
x, y, dx, dy*4, -1,
x, y+dy, dx, dy*2, +2 );
CV_WRITE_SEQ_ELEM( haarFeature, writer );
}
}
}
// x2_y2
if ( (x+dx*2 <= winsize.width) && (y+dy*2 <= winsize.height) ) {
if (dx*4*dy < s0) continue;
if (!symmetric || (x+x+dx*2 <=winsize.width)) {
haarFeature = cvHaarFeature( "haar_x2_y2",
x , y, dx*2, dy*2, -1,
x , y , dx , dy, +2,
x+dx, y+dy, dx , dy, +2 );
CV_WRITE_SEQ_ELEM( haarFeature, writer );
}
}
if (mode != 0 /*BASIC*/) {
// point
if ( (x+dx*3 <= winsize.width) && (y+dy*3 <= winsize.height) ) {
if (dx*9*dy < s0) continue;
if (!symmetric || (x+x+dx*3 <=winsize.width)) {
haarFeature = cvHaarFeature( "haar_point",//"haar_point"为3*3的小矩形
x , y, dx*3, dy*3, -1,
x+dx, y+dy, dx , dy , +9);
CV_WRITE_SEQ_ELEM( haarFeature, writer );
}
}
}
if (mode == 2 /*ALL*/) {
// tilted haar_x2 (x, y, w, h, b, weight)
if ( (x+2*dx <= winsize.width) && (y+2*dx+dy <= winsize.height) && (x-dy>= 0) ) {
if (dx*2*dy < s1) continue;
if (!symmetric || (x <= (winsize.width / 2) )) {
haarFeature = cvHaarFeature( "tilted_haar_x2",///????????
????
?
???
??
??????
?
?
??
??
x, y, dx*2, dy, -1,
x, y, dx , dy, +2 );
CV_WRITE_SEQ_ELEM( haarFeature, writer );
}
}
// tilted haar_y2 (x, y, w, h, b, weight)
if ( (x+dx <= winsize.width) && (y+dx+2*dy <= winsize.height) && (x-2*dy>= 0) ) {
if (dx*2*dy < s1) continue;
if (!symmetric || (x <= (winsize.width / 2) )) {
haarFeature = cvHaarFeature( "tilted_haar_y2",
x, y, dx, 2*dy, -1,
x, y, dx, dy, +2 );
CV_WRITE_SEQ_ELEM( haarFeature, writer );
}
}
// tilted haar_x3 (x, y, w, h, b, weight)
if ( (x+3*dx <= winsize.width) && (y+3*dx+dy <= winsize.height) && (x-dy>= 0) ) {
if (dx*3*dy < s2) continue;
if (!symmetric || (x <= (winsize.width / 2) )) {
haarFeature = cvHaarFeature( "tilted_haar_x3",//////?
??????????????????
?
?
??
??
x, y, dx*3, dy, -1,
x+dx, y+dx, dx , dy, +3 );
CV_WRITE_SEQ_ELEM( haarFeature, writer );
}
}
// tilted haar_y3 (x, y, w, h, b, weight)
if ( (x+dx <= winsize.width) && (y+dx+3*dy <= winsize.height) && (x-3*dy>= 0) ) {
if (dx*3*dy < s2) continue;
if (!symmetric || (x <= (winsize.width / 2) )) {
haarFeature = cvHaarFeature( "tilted_haar_y3",//?
?
?????????
???
?????????
?
x, y, dx, 3*dy, -1,
x-dy, y+dy, dx, dy, +3 );
CV_WRITE_SEQ_ELEM( haarFeature, writer );
}
}
// tilted haar_x4 (x, y, w, h, b, weight)
if ( (x+4*dx <= winsize.width) && (y+4*dx+dy <= winsize.height) && (x-dy>= 0) ) {
if (dx*4*dy < s3) continue;
if (!symmetric || (x <= (winsize.width / 2) )) {
haarFeature = cvHaarFeature( "tilted_haar_x4",//???????
??
???
???
???????
x, y, dx*4, dy, -1,
x+dx, y+dx, dx*2, dy, +2 );
CV_WRITE_SEQ_ELEM( haarFeature, writer );
}
}
// tilted haar_y4 (x, y, w, h, b, weight)
if ( (x+dx <= winsize.width) && (y+dx+4*dy <= winsize.height) && (x-4*dy>= 0) ) {
if (dx*4*dy < s3) continue;
if (!symmetric || (x <= (winsize.width / 2) )) {
haarFeature = cvHaarFeature( "tilted_haar_y4",//???????
???
?
??
???
x, y, dx, 4*dy, -1,
x-dy, y+dy, dx, 2*dy, +2 );
CV_WRITE_SEQ_ELEM( haarFeature, writer );
}
}
/*
// tilted point
if ( (x+dx*3 <= winsize.width - 1) && (y+dy*3 <= winsize.height - 1) && (x-3*dy>= 0)) {
if (dx*9*dy < 36) continue;
if (!symmetric || (x <= (winsize.width / 2) )) {
haarFeature = cvHaarFeature( "tilted_haar_point",
x, y, dx*3, dy*3, -1,
x, y+dy, dx , dy, +9 );
CV_WRITE_SEQ_ELEM( haarFeature, writer );
}
}
*/
}
}
}
}
}
seq = cvEndWriteSeq( &writer );//函数cvEndWriteSeq完毕写入操作并返回指向被写入元素的序列的地址,
//同一时候,函数将截取最后那个不完整的序列块.块的剩余部分返回到内存之后,序列即能够被安全地读和写.
features = (CvIntHaarFeatures*) cvAlloc( sizeof( CvIntHaarFeatures ) +
( sizeof( CvTHaarFeature ) + sizeof( CvFastHaarFeature ) ) * seq->total );
//opencv中全部的内存分配和释放都是通过cvAlloc和cvFree合作完毕的.
//假设cvAlloc/cvFree不是匹配出现, 那么能够觉得出现了内存泄漏.
features->feature = (CvTHaarFeature*) (features + 1);
features->fastfeature = (CvFastHaarFeature*) ( features->feature + seq->total );
features->count = seq->total;
features->winsize = winsize;
//复制序列的全部或部分到一个连续内存数组中。
cvCvtSeqToArray( seq, (CvArr*) features->feature );
cvReleaseMemStorage( &storage );
//将普通的特征转化为高速计算的haar特征
icvConvertToFastHaarFeature( features->feature, features->fastfeature,
features->count, (winsize.width + 1) );
return features;
}
static
void icvReleaseIntHaarFeatures( CvIntHaarFeatures** intHaarFeatures )
{
if( intHaarFeatures != NULL && (*intHaarFeatures) != NULL )//??
???????????????
{
cvFree( intHaarFeatures );
(*intHaarFeatures) = NULL;
}
}
/*
*icvConvertToFastHaarFeature函数的定义
*/
void icvConvertToFastHaarFeature( CvTHaarFeature* haarFeature,
//输入数组
CvFastHaarFeature* fastHaarFeature,
//输出数组
int size, int step )
//size表示数组的大小。step表示积分图的row step,一步能够是8位。比方灰度图;一步也能够是24位。比方RGB图
{
int i = 0;//表示特征的个数
int j = 0;//表示一个特征里小矩形的个数
for( i = 0; i < size; i++ )
{
fastHaarFeature[i].tilted = haarFeature[i].tilted;
if( !fastHaarFeature[i].tilted )//当为直立特征
{
for( j = 0; j < CV_HAAR_FEATURE_MAX; j++ )
{
fastHaarFeature[i].rect[j].weight = haarFeature[i].rect[j].weight;
//假设没有该矩形特征,或者已经计算结束,则跳出循环
if( fastHaarFeature[i].rect[j].weight == 0.0F )
{
break;
}
//对于垂直的haar特征,利用上述定义的宏把普通的haar特征转化为能高速计算的haar特征
CV_SUM_OFFSETS( fastHaarFeature[i].rect[j].p0,
fastHaarFeature[i].rect[j].p1,
fastHaarFeature[i].rect[j].p2,
fastHaarFeature[i].rect[j].p3,
haarFeature[i].rect[j].r, step )
}
}
else //当为旋转特征时
{
for( j = 0; j < CV_HAAR_FEATURE_MAX; j++ )
{
fastHaarFeature[i].rect[j].weight = haarFeature[i].rect[j].weight;
if( fastHaarFeature[i].rect[j].weight == 0.0F )
{
break;
}
////对于旋转的haar特征,利用上述定义的宏把普通的haar特征转化为能高速计算的haar特征
CV_TILTED_OFFSETS( fastHaarFeature[i].rect[j].p0,
fastHaarFeature[i].rect[j].p1,
fastHaarFeature[i].rect[j].p2,
fastHaarFeature[i].rect[j].p3,
haarFeature[i].rect[j].r, step )
}
}
}
}
/*
* icvCreateHaarTrainingData
*
* Create haar training data used in stage training
功能:为训练分类器分配所需内存。而且返回内存地址
输入:
Winsize:样本图像大小
Maxnumsamples:样本总数。Maxnumsamples=正样本总是+负样本总数。
输出:CvHaarTrainigData*data,data指向分配的内存首地址。
伪代码:
为data的各个成员分配内存。
Return data;http://www.opencvchina.com/thread-191-1-1.html
*/
static
CvHaarTrainigData* icvCreateHaarTrainingData( CvSize winsize, int maxnumsamples )
{
CvHaarTrainigData* data;
/* #define CV_FUNCNAME( Name ) /
static char cvFuncName[] = Name
CV_FUNCNAME 定义变量 cvFuncName存放函数名,用于出错时能够报告出错的函数
*/
CV_FUNCNAME( "icvCreateHaarTrainingData" );
/*
__BEGIN__ 和__END__配套使用,当出现error时,EXIT cxerror.h 中
#define __BEGIN__ {
#define __END__ goto exit; exit: ; }
对于代码中 __BEGIN__ 和__END__后面多加一个分号 的解释:
由于 __BEGIN__;等价于{; ,当中分号(;)为一个空语句,是合理的,但不要也行.__END__也一样.
*/
__BEGIN__;
data = NULL;
uchar* ptr = NULL;
size_t datasize = 0;//size_t的全称应该是size type,就是说“一种用来记录大小的数据类型”。
//由于size_t类型的数据事实上是保存了一个整数,所以它也能够做加减乘除,也能够转化为int并赋值给int类型的变量。
datasize = sizeof( CvHaarTrainigData ) +
/* sum and tilted */
( 2 * (winsize.width + 1) * (winsize.height + 1) * sizeof( sum_type ) +
sizeof( float ) + /* normfactor */
sizeof( float ) + /* cls */
sizeof( float ) /* weight */
) * maxnumsamples;
CV_CALL( data = (CvHaarTrainigData*) cvAlloc( datasize ) );//??????
???
???
?
??
/*
*void *memset(void *s, int ch, size_t n);
*函数解释:将s中前n个字节 (typedef unsigned int size_t )用 ch 替换并返回 s 。
*memset:作用是在一段内存块中填充某个给定的值,它是对较大的结构体或数组进行清零操作的一种最快方法[1] 。
*/
memset( (void*)data, 0, datasize );
data->maxnum = maxnumsamples;
data->winsize = winsize;
ptr = (uchar*)(data + 1);
data->sum = cvMat( maxnumsamples, (winsize.width + 1) * (winsize.height + 1),
CV_SUM_MAT_TYPE, (void*) ptr );
ptr += sizeof( sum_type ) * maxnumsamples * (winsize.width+1) * (winsize.height+1);
data->tilted = cvMat( maxnumsamples, (winsize.width + 1) * (winsize.height + 1),
CV_SUM_MAT_TYPE, (void*) ptr );
ptr += sizeof( sum_type ) * maxnumsamples * (winsize.width+1) * (winsize.height+1);
data->normfactor = cvMat( 1, maxnumsamples, CV_32FC1, (void*) ptr );
ptr += sizeof( float ) * maxnumsamples;
data->cls = cvMat( 1, maxnumsamples, CV_32FC1, (void*) ptr );
ptr += sizeof( float ) * maxnumsamples;
data->weights = cvMat( 1, maxnumsamples, CV_32FC1, (void*) ptr );
data->valcache = NULL;
data->idxcache = NULL;
__END__;
return data;
}
/*
*icvReleaseHaarTrainingDataCache的作用
*释放训练样本数据缓存,包括样本特征的缓存,以及排序后的样本特征的缓存
*/
static
void icvReleaseHaarTrainingDataCache( CvHaarTrainigData** haarTrainingData )
{
if( haarTrainingData != NULL && (*haarTrainingData) != NULL )
{
//CvMat* valcache; 样本的特征,一共同拥有maxnum行 。特征总数(CvIntHaarFeatures.count)个列
if( (*haarTrainingData)->valcache != NULL )
{
cvReleaseMat( &(*haarTrainingData)->valcache );
(*haarTrainingData)->valcache = NULL;
}
// CvMat* idxcache对样本标号按特征值升序排序。一共同拥有特征总数(CvIntHaarFeatures.count)个行,maxnum 个列
if( (*haarTrainingData)->idxcache != NULL )
{
cvReleaseMat( &(*haarTrainingData)->idxcache );
(*haarTrainingData)->idxcache = NULL;
}
}
}
/*
*icvReleaseHaarTrainingData的作用
*释放训练样本数据缓存
*/
static
void icvReleaseHaarTrainingData( CvHaarTrainigData** haarTrainingData )
{
if( haarTrainingData != NULL && (*haarTrainingData) != NULL )
{
icvReleaseHaarTrainingDataCache( haarTrainingData );
cvFree( haarTrainingData );
}
}
/*
*函数icvGetTrainingDataCallback介绍
*功能:对全部样本计算特征编号从first開始的num个特征,并保存到mat里。
*输入:
*CvMat* mat矩阵样本总数个行,num个列。保存每一个样本的num个特征值。
*First:特征类型编号的開始处
*Num:要计算的特征类型个数。
*Userdata:积分矩阵和权重、特征模板等信息。
*输出:
*CvMat* mat矩阵样本总数个行。num个列。
保存每一个样本的num个特征值。
*/
static
void icvGetTrainingDataCallback( CvMat* mat, CvMat* sampleIdx, CvMat*,
int first, int num, void* userdata )
{
int i = 0;
int j = 0;
float val = 0.0F;
float normfactor = 0.0F;
CvHaarTrainingData* training_data;
CvIntHaarFeatures* haar_features;
#ifdef CV_COL_ARRANGEMENT
assert( mat->rows >= num );
#else
assert( mat->cols >= num );
#endif
//userdata = cvUserdata( data, haarFeatures )
//userdata包括了參与训练的积分图和特征,其指针应该是用于回调的用户參数
training_data = ((CvUserdata*) userdata)->trainingData;
haar_features = ((CvUserdata*) userdata)->haarFeatures;
if( sampleIdx == NULL )
{
int num_samples;
#ifdef CV_COL_ARRANGEMENT
num_samples = mat->cols;
#else
num_samples = mat->rows;
#endif
for( i = 0; i < num_samples; i++ )//样本数量
{
for( j = 0; j < num; j++ )//每一个样本的第j个特征
{ //计算一个样本(积分图为sum和tilted)的一个HaarFeature。并返回该值
val = cvEvalFastHaarFeature(
( haar_features->fastfeature
+ first + j ),
(sum_type*) (training_data->sum.data.ptr
+ i * training_data->sum.step),
(sum_type*) (training_data->tilted.data.ptr
+ i * training_data->tilted.step) );
normfactor = training_data->normfactor.data.fl[i];
val = ( normfactor == 0.0F ) ? 0.0F : (val / normfactor);
#ifdef CV_COL_ARRANGEMENT
CV_MAT_ELEM( *mat, float, j, i ) = val;
#else
CV_MAT_ELEM( *mat, float, i, j ) = val;
#endif
}
}
}
else
{
uchar* idxdata = NULL;
size_t step = 0;
int numidx = 0;
int idx = 0;
assert( CV_MAT_TYPE( sampleIdx->type ) == CV_32FC1 );
idxdata = sampleIdx->data.ptr;
if( sampleIdx->rows == 1 )
{
step = sizeof( float );
numidx = sampleIdx->cols;
}
else
{
step = sampleIdx->step;
numidx = sampleIdx->rows;
}
for( i = 0; i < numidx; i++ )
{
for( j = 0; j < num; j++ )
{
idx = (int)( *((float*) (idxdata + i * step)) );
val = cvEvalFastHaarFeature(
( haar_features->fastfeature
+ first + j ),
(sum_type*) (training_data->sum.data.ptr
+ idx * training_data->sum.step),
(sum_type*) (training_data->tilted.data.ptr
+ idx * training_data->tilted.step) );
normfactor = training_data->normfactor.data.fl[idx];
val = ( normfactor == 0.0F ) ? 0.0F : (val / normfactor);
#ifdef CV_COL_ARRANGEMENT
CV_MAT_ELEM( *mat, float, j, idx ) = val;
#else
CV_MAT_ELEM( *mat, float, idx, j ) = val;
#endif
}
}
}
#if 0 /*def CV_VERBOSE*/
if( first % 5000 == 0 )
{
fprintf( stderr, "%3d%%
", (int) (100.0 * first /
haar_features->count) );
fflush( stderr );
}
#endif /* CV_VERBOSE */
}
static
void icvPrecalculate( CvHaarTrainingData* data, CvIntHaarFeatures* haarFeatures,
int numprecalculated )
{
CV_FUNCNAME( "icvPrecalculate" );
__BEGIN__;
icvReleaseHaarTrainingDataCache( &data );
numprecalculated -= numprecalculated % CV_STUMP_TRAIN_PORTION;
numprecalculated = MIN( numprecalculated, haarFeatures->count );
if( numprecalculated > 0 )
{
//size_t datasize;
int m;
CvUserdata userdata;
/* private variables */
#ifdef CV_OPENMP
CvMat t_data;
CvMat t_idx;
int first;
int t_portion;
int portion = CV_STUMP_TRAIN_PORTION;
#endif /* CV_OPENMP */
m = data->sum.rows;
#ifdef CV_COL_ARRANGEMENT
CV_CALL( data->valcache = cvCreateMat( numprecalculated, m, CV_32FC1 ) );
#else
CV_CALL( data->valcache = cvCreateMat( m, numprecalculated, CV_32FC1 ) );
#endif
CV_CALL( data->idxcache = cvCreateMat( numprecalculated, m, CV_IDX_MAT_TYPE ) );
userdata = cvUserdata( data, haarFeatures );
#ifdef CV_OPENMP
#pragma omp parallel for private(t_data, t_idx, first, t_portion)
for( first = 0; first < numprecalculated; first += portion )
{
t_data = *data->valcache;
t_idx = *data->idxcache;
t_portion = MIN( portion, (numprecalculated - first) );
/* indices */
t_idx.rows = t_portion;
t_idx.data.ptr = data->idxcache->data.ptr + first * ((size_t)t_idx.step);
/* feature values */
#ifdef CV_COL_ARRANGEMENT
t_data.rows = t_portion;
t_data.data.ptr = data->valcache->data.ptr +
first * ((size_t) t_data.step );
#else
t_data.cols = t_portion;
t_data.data.ptr = data->valcache->data.ptr +
first * ((size_t) CV_ELEM_SIZE( t_data.type ));
#endif
icvGetTrainingDataCallback( &t_data, NULL, NULL, first, t_portion,
&userdata );
#ifdef CV_COL_ARRANGEMENT
cvGetSortedIndices( &t_data, &t_idx, 0 );
#else
cvGetSortedIndices( &t_data, &t_idx, 1 );
#endif
#ifdef CV_VERBOSE
putc( '.', stderr );
fflush( stderr );
#endif /* CV_VERBOSE */
}
#ifdef CV_VERBOSE
fprintf( stderr, "
" );
fflush( stderr );
#endif /* CV_VERBOSE */
#else
icvGetTrainingDataCallback( data->valcache, NULL, NULL, 0, numprecalculated,
&userdata );
#ifdef CV_COL_ARRANGEMENT
cvGetSortedIndices( data->valcache, data->idxcache, 0 );
#else
cvGetSortedIndices( data->valcache, data->idxcache, 1 );
#endif
#endif /* CV_OPENMP */
}
__END__;
}
static
void icvSplitIndicesCallback( int compidx, float threshold,
CvMat* idx, CvMat** left, CvMat** right,
void* userdata )
{
CvHaarTrainingData* data;
CvIntHaarFeatures* haar_features;
int i;
int m;
CvFastHaarFeature* fastfeature;
data = ((CvUserdata*) userdata)->trainingData;
haar_features = ((CvUserdata*) userdata)->haarFeatures;
fastfeature = &haar_features->fastfeature[compidx];
m = data->sum.rows;
*left = cvCreateMat( 1, m, CV_32FC1 );
*right = cvCreateMat( 1, m, CV_32FC1 );
(*left)->cols = (*right)->cols = 0;
if( idx == NULL )
{
for( i = 0; i < m; i++ )
{
if( cvEvalFastHaarFeature( fastfeature,
(sum_type*) (data->sum.data.ptr + i * data->sum.step),
(sum_type*) (data->tilted.data.ptr + i * data->tilted.step) )
< threshold * data->normfactor.data.fl[i] )
{
(*left)->data.fl[(*left)->cols++] = (float) i;
}
else
{
(*right)->data.fl[(*right)->cols++] = (float) i;
}
}
}
else
{
uchar* idxdata;
int idxnum;
size_t idxstep;
int index;
idxdata = idx->data.ptr;
idxnum = (idx->rows == 1) ? idx->cols : idx->rows;
idxstep = (idx->rows == 1) ?
CV_ELEM_SIZE( idx->type ) : idx->step;
for( i = 0; i < idxnum; i++ )
{
index = (int) *((float*) (idxdata + i * idxstep));
if( cvEvalFastHaarFeature( fastfeature,
(sum_type*) (data->sum.data.ptr + index * data->sum.step),
(sum_type*) (data->tilted.data.ptr + index * data->tilted.step) )
< threshold * data->normfactor.data.fl[index] )
{
(*left)->data.fl[(*left)->cols++] = (float) index;
}
else
{
(*right)->data.fl[(*right)->cols++] = (float) index;
}
}
}
}
/*
* icvCreateCARTStageClassifier
*
* Create stage classifier with trees as weak classifiers
* data - haar training data. It must be created and filled before call
* minhitrate - desired min hit rate
* maxfalsealarm - desired max false alarm rate
* symmetric - if not 0 it is assumed that samples are vertically symmetric
* numprecalculated - number of features that will be precalculated. Each precalculated
* feature need (number_of_samples*(sizeof( float ) + sizeof( short ))) bytes of memory
* weightfraction - weight trimming parameter
* numsplits - number of binary splits in each tree
* boosttype - type of applied boosting algorithm
* stumperror - type of used error if Discrete AdaBoost algorithm is applied
* maxsplits - maximum total number of splits in all weak classifiers.
* If it is not 0 then NULL returned if total number of splits exceeds <maxsplits>.
*/
static
//icvCreateCARTStageClassifier部分内容选自http://www.sjsjw.com/kf_www/article/000119ABA007840.asp
CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data, // 全部训练样本
CvMat* sampleIdx, // 实际训练样本序列
CvIntHaarFeatures* haarFeatures, // 全部HAAR特征
float minhitrate, // 最小正检率(用于确定强分类器阈值)
float maxfalsealarm, // 最大误检率(用于确定是否收敛)
int symmetric, // HAAR是否对称
float weightfraction, // 样本剔除比例(用于剔除小权值样本)
int numsplits, // 每一个弱分类器特征个数(一般为1)
CvBoostType boosttype, // adaboost类型
CvStumpError stumperror, // Discrete AdaBoost中的阈值计算方式
int maxsplits ) // 弱分类器最大个数{
#ifdef CV_COL_ARRANGEMENT
int flags = CV_COL_SAMPLE;
#else
int flags = CV_ROW_SAMPLE;
#endif
CvStageHaarClassifier* stage = NULL; // 强分类器
CvBoostTrainer* trainer; // 暂时训练器。用于更新样本权值
CvCARTClassifier* cart = NULL; // 弱分类器
CvCARTTrainParams trainParams; // 训练參数
CvMTStumpTrainParams stumpTrainParams; // 弱分类器參数
//CvMat* trainData = NULL;
//CvMat* sortedIdx = NULL;
CvMat eval; // 暂时矩阵
int n = 0; // 特征总数
int m = 0; // 总样本个数
int numpos = 0; // 正样本个数
int numneg = 0; // 负样本个数
int numfalse = 0; // 误检样本个数
float sum_stage = 0.0F; // 置信度累积和
float threshold = 0.0F; // 强分类器阈值
float falsealarm = 0.0F; // 误检率
//CvMat* sampleIdx = NULL;
CvMat* trimmedIdx; // 剔除小权值之后的样本序列
//float* idxdata = NULL;
//float* tempweights = NULL;
//int idxcount = 0;
CvUserdata userdata; // 训练数据
int i = 0;
int j = 0;
int idx;
int numsamples; // 实际样本个数
int numtrimmed; // 剔除小权值之后的样本个数
CvCARTHaarClassifier* classifier; // 弱分类器
CvSeq* seq = NULL;
CvMemStorage* storage = NULL;
CvMat* weakTrainVals; // 样本类别。仅仅有logitboost才会用到
float alpha;
float sumalpha;
int num_splits; // 弱分类器个数
#ifdef CV_VERBOSE
printf( "+----+----+-+---------+---------+---------+---------+
" );
printf( "| N |%%SMP|F| ST.THR | HR | FA | EXP. ERR|
" );
printf( "+----+----+-+---------+---------+---------+---------+
" );
#endif /* CV_VERBOSE */
n = haarFeatures->count;//这是haar特征的数目,20*20的数目是78460,24*24的是162336,30*30的是394725.
m = data->sum.rows;
numsamples = (sampleIdx) ? MAX( sampleIdx->rows, sampleIdx->cols ) : m;
userdata = cvUserdata( data, haarFeatures );
/* 弱分类參数设置 */
stumpTrainParams.type = ( boosttype == CV_DABCLASS )
? CV_CLASSIFICATION_CLASS : CV_REGRESSION; // 分类或者回归
stumpTrainParams.error = ( boosttype == CV_LBCLASS || boosttype == CV_GABCLASS )
? CV_SQUARE : stumperror;
stumpTrainParams.portion = CV_STUMP_TRAIN_PORTION; // 每组特征个数
stumpTrainParams.getTrainData = icvGetTrainingDataCallback; // 计算样本的haar值
stumpTrainParams.numcomp = n; // 特征个数
stumpTrainParams.userdata = &userdata;
stumpTrainParams.sortedIdx = data->idxcache; // 特征-样本序号矩阵(排序之后)
trainParams.count = numsplits;
trainParams.stumpTrainParams = (CvClassifierTrainParams*) &stumpTrainParams;
trainParams.stumpConstructor = cvCreateMTStumpClassifier; // 筛选最优弱分类器
trainParams.splitIdx = icvSplitIndicesCallback; // 没用到过
trainParams.userdata = &userdata; //这是对cart弱分类器參数的设置
// 暂时向量。用于存放样本haar特征值,数据的类型是32位浮点型单通道。
eval = cvMat( 1, m, CV_32FC1, cvAlloc( sizeof( float ) * m ) );
storage = cvCreateMemStorage();
// 最优弱分类器存储序列
seq = cvCreateSeq( 0, sizeof( *seq ), sizeof( classifier ), storage );
// 样本类别,仅仅有logitboost才会用到
weakTrainVals = cvCreateMat( 1, m, CV_32FC1 );
// 初始化样本类别与权重。weakTrainVals为{-1, 1},权重都一样
trainer = cvBoostStartTraining( &data->cls, weakTrainVals, &data->weights,
sampleIdx, boosttype );
num_splits = 0;
sumalpha = 0.0F;
do
{
#ifdef CV_VERBOSE
int v_wt = 0;
int v_flipped = 0;
#endif /* CV_VERBOSE */
trimmedIdx = cvTrimWeights( &data->weights, sampleIdx, weightfraction );
numtrimmed = (trimmedIdx) ?
MAX( trimmedIdx->rows, trimmedIdx->cols ) : m;
#ifdef CV_VERBOSE
v_wt = 100 * numtrimmed / numsamples;
v_flipped = 0;
#endif /* CV_VERBOSE */
// 重要函数,创建CART树的同一时候,计算出当前最优弱分类器,一般仅仅有根节点
cart = (CvCARTClassifier*) cvCreateCARTClassifier( data->valcache,
flags,
weakTrainVals, 0, 0, 0, trimmedIdx,
&(data->weights),
(CvClassifierTrainParams*) &trainParams );
// 创建弱分类器
classifier = (CvCARTHaarClassifier*) icvCreateCARTHaarClassifier( numsplits );
// 将CART树转化为弱分类器
icvInitCARTHaarClassifier( classifier, cart, haarFeatures );
num_splits += classifier->count;
cart->release( (CvClassifier**) &cart );
// 为何一定要在奇数个弱分类器处计算?
if( symmetric && (seq->total % 2) )
{
float normfactor = 0.0F;
CvStumpClassifier* stump;
/* 翻转HAAR特征 */
for( i = 0; i < classifier->count; i++ )
{
if( classifier->feature[i].desc[0] == 'h' )
{
for( j = 0; j < CV_HAAR_FEATURE_MAX &&
classifier->feature[i].rect[j].weight != 0.0F; j++ )
{
classifier->feature[i].rect[j].r.x = data->winsize.width -
classifier->feature[i].rect[j].r.x -
classifier->feature[i].rect[j].r.width;
}
}
else
{
int tmp = 0;
/* (x,y) -> (24-x,y) */
/* w -> h; h -> w */
for( j = 0; j < CV_HAAR_FEATURE_MAX &&
classifier->feature[i].rect[j].weight != 0.0F; j++ )
{
classifier->feature[i].rect[j].r.x = data->winsize.width -
classifier->feature[i].rect[j].r.x;
CV_SWAP( classifier->feature[i].rect[j].r.width,
classifier->feature[i].rect[j].r.height, tmp );
}
}
}
// 转化为基于积分图计算的特征
icvConvertToFastHaarFeature( classifier->feature,
classifier->fastfeature,
classifier->count, data->winsize.width + 1 );
// 为了验证最新翻转特征是否为最优特征
stumpTrainParams.getTrainData = NULL;
stumpTrainParams.numcomp = 1;
stumpTrainParams.userdata = NULL;
stumpTrainParams.sortedIdx = NULL;
// 验证是否新生成的特征可作为最优弱分类器
for( i = 0; i < classifier->count; i++ )
{
for( j = 0; j < numtrimmed; j++ )
{
// 获取训练样本
idx = icvGetIdxAt( trimmedIdx, j );
// 对每一个训练样本计算Haar特征
eval.data.fl[idx] = cvEvalFastHaarFeature( &classifier->fastfeature[i],
(sum_type*) (data->sum.data.ptr + idx * data->sum.step),
(sum_type*) (data->tilted.data.ptr + idx * data->tilted.step) );
// 归一化因子
normfactor = data->normfactor.data.fl[idx];
// 对Haar特征归一化
eval.data.fl[idx] = ( normfactor == 0.0F )
? 0.0F : (eval.data.fl[idx] / normfactor);
}
// 计算最优弱分类器
stump = (CvStumpClassifier*) trainParams.stumpConstructor( &eval,
CV_COL_SAMPLE,
weakTrainVals, 0, 0, 0, trimmedIdx,
&(data->weights),
trainParams.stumpTrainParams );
classifier->threshold[i] = stump->threshold; // 阈值
if( classifier->left[i] <= 0 )
{
classifier->val[-classifier->left[i]] = stump->left; // 左分支输出置信度
}
if( classifier->right[i] <= 0 )
{
classifier->val[-classifier->right[i]] = stump->right; // 右分支输出置信度
}
stump->release( (CvClassifier**) &stump );
}
// 还原參数,參数支持cvCreateCARTClassifier函数
stumpTrainParams.getTrainData = icvGetTrainingDataCallback;
stumpTrainParams.numcomp = n;
stumpTrainParams.userdata = &userdata;
stumpTrainParams.sortedIdx = data->idxcache;
#ifdef CV_VERBOSE
v_flipped = 1;
#endif /* CV_VERBOSE */
} /* if symmetric */
if( trimmedIdx != sampleIdx )
{
cvReleaseMat( &trimmedIdx );
trimmedIdx = NULL;
}
// 基于当前最优弱分类器,更新样本特征值
for( i = 0; i < numsamples; i++ )
{
idx = icvGetIdxAt( sampleIdx, i );
eval.data.fl[idx] = classifier->eval( (CvIntHaarClassifier*) classifier,
(sum_type*) (data->sum.data.ptr + idx * data->sum.step),
(sum_type*) (data->tilted.data.ptr + idx * data->tilted.step),
data->normfactor.data.fl[idx] );
}
// 更新样本权重,假设是LogitBoost。也会更新weakTrainVals
alpha = cvBoostNextWeakClassifier( &eval, &data->cls, weakTrainVals,
&data->weights, trainer );
// 这个变量没什么用
sumalpha += alpha;
for( i = 0; i <= classifier->count; i++ )
{
if( boosttype == CV_RABCLASS )
{
classifier->val[i] = cvLogRatio( classifier->val[i] );
}
classifier->val[i] *= alpha;
}
// 加入弱分类器
cvSeqPush( seq, (void*) &classifier );
// 正样本个数
numpos = 0;
// 遍历sampleIdx中全部样本
for( i = 0; i < numsamples; i++ )
{
// 获得样本序号
idx = icvGetIdxAt( sampleIdx, i );
// 假设样本为正样本
if( data->cls.data.fl[idx] == 1.0F )
{
// 初始化特征值
eval.data.fl[numpos] = 0.0F;
// 遍历seq中全部弱分类器
for( j = 0; j < seq->total; j++ )
{
// 获取弱分类器
classifier = *((CvCARTHaarClassifier**) cvGetSeqElem( seq, j ));
// 累积计算当前正样本的弱分类器输出结果
eval.data.fl[numpos] += classifier->eval(
(CvIntHaarClassifier*) classifier,
(sum_type*) (data->sum.data.ptr + idx * data->sum.step),
(sum_type*) (data->tilted.data.ptr + idx * data->tilted.step),
data->normfactor.data.fl[idx] );
}
/* eval.data.fl[numpos] = 2.0F * eval.data.fl[numpos] - seq->total; */
numpos++;
}
}
// 对输出结果值排序
icvSort_32f( eval.data.fl, numpos, 0 );
// 计算阈值,应该是大于threshold则为正类。小于threshold则为负类
threshold = eval.data.fl[(int) ((1.0F - minhitrate) * numpos)];
numneg = 0;
numfalse = 0;
// 遍历全部样本
for( i = 0; i < numsamples; i++ )
{
idx = icvGetIdxAt( sampleIdx, i );
// 假设样本为负样本
if( data->cls.data.fl[idx] == 0.0F )
{
numneg++;
sum_stage = 0.0F;
// 遍历seq中全部弱分类器
for( j = 0; j < seq->total; j++ )
{
classifier = *((CvCARTHaarClassifier**) cvGetSeqElem( seq, j ));
// 累积当前负样本的分类器输出结果
sum_stage += classifier->eval( (CvIntHaarClassifier*) classifier,
(sum_type*) (data->sum.data.ptr + idx * data->sum.step),
(sum_type*) (data->tilted.data.ptr + idx * data->tilted.step),
data->normfactor.data.fl[idx] );
}
/* sum_stage = 2.0F * sum_stage - seq->total; */
// 由于小于threshold为负类,所以以下是分类错误的情况
if( sum_stage >= (threshold - CV_THRESHOLD_EPS) )
{
numfalse++;
}
}
}
// 计算虚警率
falsealarm = ((float) numfalse) / ((float) numneg);
// 输出内容
#ifdef CV_VERBOSE
{
// 正样本检出率
float v_hitrate = 0.0F;
// 负样本误检率
float v_falsealarm = 0.0F;
/* expected error of stage classifier regardless threshold */
// 这是什么?
float v_experr = 0.0F;
// 遍历全部样本
for( i = 0; i < numsamples; i++ )
{
idx = icvGetIdxAt( sampleIdx, i );
sum_stage = 0.0F;
// 遍历seq中全部弱分类器
for( j = 0; j < seq->total; j++ )
{
classifier = *((CvCARTHaarClassifier**) cvGetSeqElem( seq, j ));
sum_stage += classifier->eval( (CvIntHaarClassifier*) classifier,
(sum_type*) (data->sum.data.ptr + idx * data->sum.step),
(sum_type*) (data->tilted.data.ptr + idx * data->tilted.step),
data->normfactor.data.fl[idx] );
}
/* sum_stage = 2.0F * sum_stage - seq->total; */
// 仅仅须要推断单一分支就可以
if( sum_stage >= (threshold - CV_THRESHOLD_EPS) )
{
if( data->cls.data.fl[idx] == 1.0F )
{
v_hitrate += 1.0F;
}
else
{
v_falsealarm += 1.0F;
}
}
// 正类样本的sum_stage必须大于0
if( ( sum_stage >= 0.0F ) != (data->cls.data.fl[idx] == 1.0F) )
{
v_experr += 1.0F;
}
}
v_experr /= numsamples;
printf( "|%4d|%3d%%|%c|%9f|%9f|%9f|%9f|
",
seq->total, v_wt, ( (v_flipped) ?
'+' : '-' ),
threshold, v_hitrate / numpos, v_falsealarm / numneg,
v_experr );
printf( "+----+----+-+---------+---------+---------+---------+
" );
fflush( stdout );
}
#endif /* CV_VERBOSE */
// 两种收敛方式,一种是误检率小于规定阈值,还有一种是弱分类器个数小于规定阈值
} while( falsealarm > maxfalsealarm && (!maxsplits || (num_splits < maxsplits) ) );
cvBoostEndTraining( &trainer );
if( falsealarm > maxfalsealarm )
{
stage = NULL;
}
else
{
stage = (CvStageHaarClassifier*) icvCreateStageHaarClassifier( seq->total,
threshold );
cvCvtSeqToArray( seq, (CvArr*) stage->classifier );
}
/* CLEANUP */
cvReleaseMemStorage( &storage );
cvReleaseMat( &weakTrainVals );
cvFree( &(eval.data.ptr) );
return (CvIntHaarClassifier*) stage;
}
static
CvBackgroundData* icvCreateBackgroundData( const char* filename, CvSize winsize )
{
CvBackgroundData* data = NULL;
const char* dir = NULL;
char full[PATH_MAX];
char* imgfilename = NULL;
size_t datasize = 0;
int count = 0;
FILE* input = NULL;
char* tmp = NULL;
int len = 0;
assert( filename != NULL );
dir = strrchr( filename, '\' );
if( dir == NULL )
{
dir = strrchr( filename, '/' );
}
if( dir == NULL )
{
imgfilename = &(full[0]);
}
else
{
strncpy( &(full[0]), filename, (dir - filename + 1) );
imgfilename = &(full[(dir - filename + 1)]);
}
input = fopen( filename, "r" );
if( input != NULL )
{
count = 0;
datasize = 0;
/* count */
while( !feof( input ) )
{
*imgfilename = '