X-Boost  2.3.8
DiscreteAdaBoost.h
1 /* XBoost: Ada-Boost and Friends on Haar/ICF/HOG Features, Library and ToolBox
2  *
3  * Copyright (c) 2008-2014 Paolo Medici <medici@ce.unipr.it>
4  *
5  * This library is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU Lesser General Public
7  * License as published by the Free Software Foundation; either
8  * version 2 of the License, or (at your option) any later version.
9  *
10  * This library is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * Lesser General Public License for more details.
14  *
15  * You should have received a copy of the GNU Lesser General Public
16  * License along with this library; if not, write to the
17  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
18  * Boston, MA 02111-1307, USA.
19  */
20 
21 
22 #ifndef _DISCRETE_ADABOOST_H
23 #define _DISCRETE_ADABOOST_H
24 
25 #include "AdaBoost.h"
27 
28 #undef _epsilon
29 #define _epsilon (1E-10)
32 
33 // TODO:
34 #include "../DataSet.h"
35 
36 
40 public:
44  template<class DataSetType>
45  static void Update(DataSetType & set, double alpha)
46  {
47  double bw = exp( alpha ); // weight associated to error (>1)
48  double gw = 1.0/bw; // weight associated to correct detection (<1)
49  double z = 0.0;
50  double dmin,dmax;
51 // ricalcolo z per evitare errori di arrotondamento (capitano anche per i double)
52  for(unsigned int i =0; i<set.Size(); i++)
53  {
54  set.templates[i].d *= (set.templates[i].correct() ? gw : bw);
55  z+= set.templates[i].d;
56  }
57  std::cout << "z:" << z <<" bw:" << bw << " gw:" << gw << std::endl;
58  // normalize (TODO: capire se serva)
59  z = 1.0/z;
60  for(unsigned int i =0; i<set.Size(); i++)
61  {
62  set.templates[i].d *= z;
63  }
64 
65 // calc some stats
66  dmin = dmax = set.templates[0].d;
67  // davg = set.templates[0].d; // average e' sempre uguale in adaboost
68  for(unsigned int i =1; i<set.Size(); i++)
69  {
70  // davg += set.templates[i].d;
71  if(set.templates[i].d>dmax)
72  dmax = set.templates[i].d;
73  if(set.templates[i].d<dmin)
74  dmin = set.templates[i].d;
75  }
76  std::cout << "Distribution weights range from " << dmin << " to " << dmax << "." << std::endl;
77 
78  }
79 
80 
83  template<class DataSetType>
84  static double CalculateError(const DataSetType & set)
85  {
86  double e = 0.0;
87  for(unsigned int i =0; i<set.Size(); i++)
88  if( !set.templates[i].correct() )
89  e += set.templates[i].d;
90  return e;
91  }
92 
93 
96  template<class DataSetType>
97  static double Z(const DataSetType & set, float alpha)
98  {
99  double Z = 0.0;
100  double gw = exp( -alpha); // correct detection
101  double bw = exp( alpha); // error
102  for(unsigned int i =0; i<set.Size(); i++)
103  Z += set.templates[i].d * ( set.templates[i].correct() ? gw : bw );
104  return Z;
105  }
106 
107 
109  template<class DataSetType>
110  static void UpdateWeights(DataSetType & train, double alpha)
111  {
112  double bw = exp( alpha ); // weight associated to error (>1)
113  double gw = 1.0/bw; // weight associated to correct detection (<1)
114  double z = 0.0;
115  double dmin,dmax;
116 // ricalcolo z per evitare errori di arrotondamento (capitano anche per i double)
117  for(unsigned int i =0; i<train.Size(); i++)
118  {
119  train.templates[i].d *= (train.templates[i].correct() ? gw : bw);
120  z+= train.templates[i].d;
121  }
122  std::cout << "z:" << z <<" bw:" << bw << " gw:" << gw << std::endl;
123 // normalize
124  z = 1.0/z;
125  for(unsigned int i =0; i<train.Size(); i++)
126  {
127  train.templates[i].d *= z;
128  }
129 
130 // calc some stats
131  dmin = dmax = train.templates[0].d;
132  for(unsigned int i =1; i<train.Size(); i++)
133  {
134  if(train.templates[i].d>dmax)
135  dmax = train.templates[i].d;
136  if(train.templates[i].d<dmin)
137  dmin = train.templates[i].d;
138  }
139  std::cout << "Distribution weights range from " << dmin << " to " << dmax << std::endl;
140  }
141 
142 
144  template<class Classifier, class DataSetType>
145  static double ComputeW(const Classifier & c, DataSetType & train)
146  {
147  double wp, wn;
148  Evaluate(train, c);
149  return wp;
150  }
151 
152 };
153 
154 
159 template<class WeakClassifier>
160 class AdaBoost< BoostableClassifier<WeakClassifier> >: public DiscreteAdaBoostCommon {
161 public:
162 
164 
165 public:
166 
167  AdaBoost() { }
168 
172  template<class Classifier, class DataSetType>
173  static void Evaluate(DataSetType & set, const Classifier & h, double & wp, double & wn)
174  {
175  wp = wn = 0.0;
176  for(unsigned int i =0; i<set.Size(); i++)
177  {
178  set.templates[i].test = h.classify(getData1(set.templates[i], set), getData2(set.templates[i], set));
179  if( set.templates[i].correct() )
180  wp += set.templates[i].d;
181  else
182  wn += set.templates[i].d;
183  }
184  }
185 
186 
187 
193  template<class R, class DataSetType>
194  static void Evaluate(DataSetType & set, const R & h, double & alpha, double & Z, double & wp)
195  {
196  double wn;
197  Evaluate(set, h, wp, wn);
198  //Z Fn
199  Z = 2.0 * sqrt( wn * wp);
200  // return alpha
201  alpha = 0.5 * log ( (wp + _epsilon) / (wn + _epsilon) );
202  }
203 
216  template<class DataSetType>
217  static bool Update(BoostableClassifier<WeakClassifier> & H, DataSetType & training_set, DataSetType & validation_set, bool verbose=true)
218  {
219  double bestAlpha1, bestZ1, bestW1;
220  double bestAlpha, bestZ, bestW;
221 
222  // compute bestAlpha, bestZ, bestW
223  // nota: Evaluate modifica set.templates.test
224  Evaluate(training_set, H, bestAlpha1, bestZ1, bestW1); // re-compute stats and alpha
225  Evaluate(validation_set, H, bestAlpha, bestZ, bestW); // compute stats
226 
227  // alpha should be the best alpha associated to training_set. Elsewhere could be negative.
228  H.alpha = bestAlpha1; // as side effect, alpha is (always) recomputed here.
229 
230  if(verbose)
231  {
232  int cdp = 0;
233  int cdn = 0;
234  std::cout << "[best] V(T) ";
235 
236  if(H.debug_name() )
237  std::cout << H.debug_name() << ' ';
238 
239  std::cout << "Z:" << bestZ << "(" << bestZ1 << "), Alpha:" << bestAlpha << "(" << bestAlpha1 <<"), W:" << bestW << "(" << bestW1 << "). ";
240 
241  for(typename DataSetType::ListType::const_iterator i = validation_set.templates.begin(); i != validation_set.templates.end(); ++i)
242  {
243  if( i->correct() )
244  {
245  if(i->category == 1)
246  cdp++;
247  else
248  cdn++;
249  }
250  }
251  std::cout << cdp << "(+) " << cdn <<"(-) correct detection (" << ((cdp+cdn)*100)/validation_set.Size() << "%)\n";
252  }
253 
254  if(H.alpha>0.0)
255  {
256  // update weight (su entrambi)
257  DiscreteAdaBoostCommon::Update(training_set, H.alpha);
258  DiscreteAdaBoostCommon::Update(validation_set, H.alpha);
259  return true;
260  }
261  else
262  {
263  std::cerr << "Trying to use a negative classifier! alpha=" << H.alpha << std::endl;
264  return false;
265  }
266  }
267 
269  template<class Classifier, class DataSetType>
270  static double ComputeAlpha(const Classifier & c, DataSetType & train)
271  {
272  double alpha, Z, wp;
273  Evaluate(train, c, alpha, Z, wp);
274  return alpha;
275  }
276 
277 
278 };
279 
280 
281 #endif
static bool Update(BoostableClassifier< WeakClassifier > &H, DataSetType &training_set, DataSetType &validation_set, bool verbose=true)
Definition: DiscreteAdaBoost.h:217
static double Z(const DataSetType &set, float alpha)
Definition: DiscreteAdaBoost.h:97
a Voting Boostable classifier
static void Evaluate(DataSetType &set, const Classifier &h, double &wp, double &wn)
Definition: DiscreteAdaBoost.h:173
Definition: DiscreteAdaBoost.h:39
static double CalculateError(const DataSetType &set)
Definition: DiscreteAdaBoost.h:84
static void Update(DataSetType &set, double alpha)
Definition: DiscreteAdaBoost.h:45
static void Evaluate(DataSetType &set, const R &h, double &alpha, double &Z, double &wp)
Definition: DiscreteAdaBoost.h:194
static void UpdateWeights(DataSetType &train, double alpha)
Definition: DiscreteAdaBoost.h:110
float alpha
Weight associated to this classifier.
Definition: BoostableClassifier.h:43
static double ComputeW(const Classifier &c, DataSetType &train)
Definition: DiscreteAdaBoost.h:145
static double ComputeAlpha(const Classifier &c, DataSetType &train)
Definition: DiscreteAdaBoost.h:270
Definition: AdaBoost.h:32
Definition: BoostableClassifier.h:40