70 : fdoRegression(
doreg),
122 if (fKMatrix != 0) {
delete fKMatrix; fKMatrix = 0;}
136 std::vector<TMVA::SVEvent*>::iterator
idIter;
140 if((*idIter)->GetAlpha()>0)
160 if((
jevt->GetIdx()>=0) && (fB_low -
fErrorC_J > 2*fTolerance)) {
165 if((
jevt->GetIdx()<=0) && (
fErrorC_J - fB_up > 2*fTolerance)) {
172 if(
jevt->GetIdx()==0){
174 else ievt = fTEventUp;
187 std::vector<TMVA::SVEvent*>::iterator
idIter;
250 if ( gamma >= (
c_i -
c_j) )
257 if ( (
c_i -
c_j) >= gamma)
311 if((*idIter)->GetIdx()==0){
312 Float_t ii = fKMatrix->GetElement(
ievt->GetNs(), (*idIter)->GetNs());
313 Float_t jj = fKMatrix->GetElement(
jevt->GetNs(), (*idIter)->GetNs());
334 if((*idIter)->GetIdx()==0){
335 if((*idIter)->GetErrorCache()> fB_low){
336 fB_low = (*idIter)->GetErrorCache();
337 fTEventLow = (*idIter);
339 if( (*idIter)->GetErrorCache()< fB_up){
340 fB_up =(*idIter)->GetErrorCache();
341 fTEventUp = (*idIter);
348 if (
ievt->GetErrorCache() > fB_low) {
349 fB_low =
ievt->GetErrorCache();
353 fB_low =
jevt->GetErrorCache();
359 if (
ievt->GetErrorCache()< fB_low) {
360 fB_up =
ievt->GetErrorCache();
364 fB_up =
jevt->GetErrorCache() ;
375 if((fB_up > fB_low - 2*fTolerance))
return kTRUE;
392 std::vector<TMVA::SVEvent*>::iterator
idIter;
395 if (fIPyCurrentIter) *fIPyCurrentIter =
numit;
396 if (fExitFromTraining && *fExitFromTraining)
break;
406 if ((*idIter)->IsInI0()) {
427 <<
"Max number of iterations exceeded. "
428 <<
"Training may not be completed. Try use less Cost parameter" <<
Endl;
459 std::vector<TMVA::SVEvent*>::iterator
idIter;
460 if( fSupVec != 0) {
delete fSupVec; fSupVec = 0; }
461 fSupVec =
new std::vector<TMVA::SVEvent*>(0);
464 if((*idIter)->GetDeltaAlpha() !=0){
465 fSupVec->push_back((*
idIter));
476 std::vector<TMVA::SVEvent*>::iterator
idIter;
477 const Float_t epsilon = 0.001*fTolerance;
613 if( IsDiffSignificant(
b_alpha_i,
ievt->GetAlpha(), epsilon) ||
626 if((*idIter)->GetIdx()==0){
627 Float_t k_ii = fKMatrix->GetElement(
ievt->GetNs(), (*idIter)->GetNs());
628 Float_t k_jj = fKMatrix->GetElement(
jevt->GetNs(), (*idIter)->GetNs());
648 if((!(*idIter)->IsInI3()) && ((*idIter)->GetErrorCache()> fB_low)){
649 fB_low = (*idIter)->GetErrorCache();
650 fTEventLow = (*idIter);
653 if((!(*idIter)->IsInI2()) && ((*idIter)->GetErrorCache()< fB_up)){
654 fB_up =(*idIter)->GetErrorCache();
655 fTEventUp = (*idIter);
670 if(
jevt->IsInI0()) {
676 std::vector<TMVA::SVEvent*>::iterator
idIter;
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
const_iterator begin() const
const_iterator end() const
ostringstream derivative to redirect and format output
Event class for Support Vector Machine.
Float_t GetTarget() const
Float_t GetCweight() const
Int_t GetTypeFlag() const
void SetErrorCache(Float_t err_cache)
Kernel for Support Vector Machine.
Kernel matrix for Support Vector Machine.
Float_t * GetLine(UInt_t)
returns a row of the kernel matrix
SVEvent * fTEventUp
last optimized event
Bool_t TakeStep(SVEvent *, SVEvent *)
void Train(UInt_t nIter=1000)
train the SVM
Bool_t IsDiffSignificant(Float_t, Float_t, Float_t)
Float_t fTolerance
documentation
Bool_t ExamineExample(SVEvent *)
Bool_t ExamineExampleReg(SVEvent *)
Bool_t fdoRegression
TODO temporary, find nicer solution.
Bool_t TakeStepReg(SVEvent *, SVEvent *)
Float_t fB_low
documentation
Float_t fB_up
documentation
SVEvent * fTEventLow
last optimized event
void SetIndex(TMVA::SVEvent *)
~SVWorkingSet()
destructor
SVWorkingSet()
constructor
std::vector< TMVA::SVEvent * > * fInputData
input events
SVKernelMatrix * fKMatrix
kernel matrix
std::vector< TMVA::SVEvent * > * GetSupportVectors()
Random number generator class based on M.
MsgLogger & Endl(MsgLogger &ml)
Short_t Max(Short_t a, Short_t b)
Returns the largest of a and b.
Short_t Min(Short_t a, Short_t b)
Returns the smallest of a and b.
Short_t Abs(Short_t d)
Returns the absolute value of parameter Short_t d.