78 #ifdef MethodMLP_UseMinuit__ 97 fUseRegulator(false), fCalculateErrors(false),
98 fPrior(0.0), fPriorDev(0), fUpdateLimit(0),
99 fTrainingMethod(kBFGS), fTrainMethodS(
"BFGS"),
100 fSamplingFraction(1.0), fSamplingEpoch(0.0), fSamplingWeight(0.0),
101 fSamplingTraining(false), fSamplingTesting(false),
102 fLastAlpha(0.0), fTau(0.),
103 fResetStep(0), fLearnRate(0.0), fDecayRate(0.0),
104 fBPMode(kSequential), fBpModeS(
"None"),
105 fBatchSize(0), fTestRate(0), fEpochMon(false),
106 fGA_nsteps(0), fGA_preCalc(0), fGA_SC_steps(0),
107 fGA_SC_rate(0), fGA_SC_factor(0.0),
108 fDeviationsFromTargets(0),
170 #ifdef MethodMLP_UseMinuit__ 200 "Train with Back-Propagation (BP), BFGS Algorithm (BFGS), or Genetic Algorithm (GA - slower and worse)");
208 DeclareOptionRef(
fEpochMon =
kFALSE,
"EpochMonitoring",
"Provide epoch-wise monitoring plots according to TestRate (caution: causes big ROOT output file!)" );
211 DeclareOptionRef(
fSamplingEpoch=1.0,
"SamplingEpoch",
"Sampling is used for the first 'SamplingEpoch' epochs, afterwards, all events are taken for training");
212 DeclareOptionRef(
fSamplingWeight=1.0,
"SamplingImportance",
" The sampling weights of events in epochs which successful (worse estimator than before) are multiplied with SamplingImportance, else they are divided.");
221 "Back-propagation learning mode: sequential or batch");
226 "Batch size: number of events/batch, only set if in Batch Mode, -1 for BatchSize=number_of_events");
229 "Minimum improvement which counts as improvement (<0 means automatic convergence check is turned off)");
232 "Number of steps (without improvement) required for convergence (<0 means automatic convergence check is turned off)");
235 "Use regulator to avoid over-training");
237 "Maximum times of regulator update");
239 "Calculates inverse Hessian matrix at the end of the training to be able to calculate the uncertainties of an MVA value");
242 "Take the events for the estimator calculations from small deviations from the desired value to large deviations only over the weight range");
256 <<
"Will ignore negative events in training!" 273 if (fBatchSize < 1 || fBatchSize > numEvents)
fBatchSize = numEvents;
282 Log() << kDEBUG <<
"Initialize learning rates" <<
Endl;
285 for (
Int_t i = 0; i < numSynapses; i++) {
298 Log() << kFATAL <<
"<CalculateEstimator> fatal error: wrong tree type: " << treeType <<
Endl;
314 histS =
new TH1F( nameS, nameS, nbin, -limit, limit );
315 histB =
new TH1F( nameB, nameB, nbin, -limit, limit );
331 for (
Int_t i = 0; i < nEvents; i++) {
347 for (
UInt_t itgt = 0; itgt < nTgts; itgt++) {
358 for (
UInt_t icls = 0; icls < nClasses; icls++) {
360 norm +=
exp( activationValue );
362 d =
exp( activationValue );
367 for (
UInt_t icls = 0; icls < nClasses; icls++) {
368 Double_t desired = (icls==cls) ? 1.0 : 0.0;
370 d = (desired-
v)*(desired-
v);
389 if (
DataInfo().IsSignal(ev) && histS != 0) histS->
Fill(
float(
v),
float(w) );
390 else if (histB != 0) histB->
Fill(
float(
v),
float(w) );
403 float deviation = (*itDev).first;
404 float devWeight = (*itDev).second;
405 weightSum += devWeight;
406 if( weightSum <= weightRangeCut ) {
407 estimator += devWeight*deviation;
411 sumOfWeights = sumOfWeightsInRange;
423 else estimator = estimator/
Float_t(sumOfWeights);
444 Log() << kFATAL <<
"ANN Network is not initialized, doing it now!"<<
Endl;
447 Log() << kDEBUG <<
"reinitialize learning rates" <<
Endl;
454 if (nSynapses>nEvents)
455 Log()<<kWARNING<<
"ANN too complicated: #events="<<nEvents<<
"\t#synapses="<<nSynapses<<
Endl;
459 std::vector<TString> titles = {
"Error on training set",
"Error on test set"};
463 #ifdef MethodMLP_UseMinuit__ 464 if (useMinuit) MinuitMinimize();
474 Log()<<kINFO<<
"Finalizing handling of Regulator terms, trainE="<<trainE<<
" testE="<<testE<<
Endl;
476 Log()<<kINFO<<
"Done with handling of Regulator terms"<<
Endl;
506 Int_t nWeights = nSynapses;
508 for (
Int_t i=0;i<nSynapses;i++) {
513 std::vector<Double_t> buffer( nWeights );
514 for (
Int_t i=0;i<nWeights;i++) buffer[i] = 0.;
517 TMatrixD Hessian ( nWeights, nWeights );
521 Int_t RegUpdateTimes=0;
532 if (
fSteps > 0)
Log() << kINFO <<
"Inaccurate progress timing for MLP... " <<
Endl;
536 for (
Int_t i = 0; i < nEpochs; i++) {
582 else SetDir( Hessian, Dir );
597 Log() << kFATAL <<
"Line search failed! Huge troubles somewhere..." <<
Endl;
602 if (dError<0)
Log()<<kWARNING<<
"\nnegative dError=" <<dError<<
Endl;
605 if (
fUseRegulator && RegUpdateTimes<fUpdateLimit && RegUpdateCD>=5 &&
fabs(dError)<0.1*AccuError) {
606 Log()<<kDEBUG<<
"\n\nUpdate regulators "<<RegUpdateTimes<<
" on epoch "<<i<<
"\tdError="<<dError<<
Endl;
645 TString convText =
Form(
"<D^2> (train/test/epoch): %.4g/%.4g/%d", trainE, testE,i );
657 if (progress2>progress) progress=progress2;
662 if (progress<i) progress=i;
682 for (
Int_t i=0;i<nSynapses;i++) {
684 Gamma[IDX++][0] = -synapse->
GetDEDw();
687 for (
Int_t i=0;i<nWeights;i++) Delta[i][0] = buffer[i];
692 for (
Int_t i=0;i<nSynapses;i++)
695 Gamma[IDX++][0] += synapse->
GetDEDw();
704 for (
Int_t i=0;i<nSynapses;i++) {
710 Int_t nPosEvents = nEvents;
711 for (
Int_t i=0;i<nEvents;i++) {
722 for (
Int_t j=0;j<nSynapses;j++) {
728 for (
Int_t i=0;i<nSynapses;i++) {
732 synapse->
SetDEDw( DEDw / nPosEvents );
747 for (
UInt_t itgt = 0; itgt < ntgt; itgt++) {
755 for (
UInt_t icls = 0; icls < nClasses; icls++) {
756 Double_t desired = ( cls==icls ? 1.0 : 0.0 );
783 for (
Int_t i=0;i<nSynapses;i++) {
785 Dir[IDX++][0] = -synapse->
GetDEDw();
818 for (
Int_t i=0;i<nSynapses;i++) {
820 DEDw[IDX++][0] = synapse->
GetDEDw();
823 dir = Hessian * DEDw;
824 for (
Int_t i=0;i<IDX;i++) dir[i][0] = -dir[i][0];
835 for (
Int_t i=0;i<nSynapses;i++) {
837 Result += Dir[IDX++][0] * synapse->
GetDEDw();
848 Int_t nWeights = nSynapses;
850 std::vector<Double_t> Origin(nWeights);
851 for (
Int_t i=0;i<nSynapses;i++) {
862 if (alpha2 < 0.01) alpha2 = 0.01;
863 else if (alpha2 > 2.0) alpha2 = 2.0;
875 for (
Int_t i=0;i<100;i++) {
894 for (
Int_t i=0;i<100;i++) {
897 Log() << kWARNING <<
"linesearch, starting to investigate direction opposite of steepestDIR" <<
Endl;
898 alpha2 = -alpha_original;
911 Log() << kWARNING <<
"linesearch, failed even in opposite direction of steepestDIR" <<
Endl;
917 if (alpha1>0 && alpha2>0 && alpha3 > 0) {
919 (err3 - err1) / ((err3 - err2) / ( alpha3 - alpha2 )
920 - ( err2 - err1 ) / (alpha2 - alpha1 )));
935 if (finalError > err1) {
936 Log() << kWARNING <<
"Line search increased error! Something is wrong." 937 <<
"fLastAlpha=" <<
fLastAlpha <<
"al123=" << alpha1 <<
" " 938 << alpha2 <<
" " << alpha3 <<
" err1="<< err1 <<
" errfinal=" << finalError <<
Endl;
941 for (
Int_t i=0;i<nSynapses;i++) {
943 buffer[IDX] = synapse->
GetWeight() - Origin[IDX];
947 if (dError) (*dError)=(errOrigin-finalError)/finalError;
959 for (
Int_t i=0;i<nSynapses;i++) {
961 synapse->
SetWeight( Origin[IDX] + Dir[IDX][0] * alpha );
976 for (
Int_t i=0;i<nEvents;i++) {
987 for (
UInt_t itgt = 0; itgt < ntgts; itgt++) {
991 for(
UInt_t icls = 0, iclsEnd =
DataInfo().GetNClasses(); icls < iclsEnd; icls++ ){
1016 error = 0.5*(output-target)*(output-target);
1059 if (
fSteps > 0)
Log() << kINFO <<
"Inaccurate progress timing for MLP... " <<
Endl;
1067 for (
Int_t i = 0; i < nEpochs; i++) {
1120 if (lateEpoch > i) lateEpoch = i;
1127 TString convText =
Form(
"<D^2> (train/test): %.4g/%.4g", trainE, testE );
1152 for (
Int_t i = 0; i < nEvents; i++) index[i] = i;
1156 for (
Int_t i = 0; i < nEvents; i++) {
1198 for (
Int_t i = 0; i <
n; i++) {
1202 index[j] = index[i];
1216 for (
Int_t i = 0; i < numSynapses; i++) {
1295 else Log() << kFATAL <<
"Estimator type unspecified!!" <<
Endl;
1296 error *= eventWeight;
1310 for (
UInt_t i = 0, iEnd = desired.size(); i < iEnd; ++i) {
1316 for (
UInt_t i = 0, iEnd = desired.size(); i < iEnd; ++i) {
1319 Double_t error = output - desired.at(i);
1320 error *= eventWeight;
1341 for (
Int_t i = numLayers-1; i >= 0; i--) {
1345 for (
Int_t j = 0; j < numNeurons; j++) {
1372 std::vector<Interval*> ranges;
1375 for (
Int_t ivar=0; ivar< numWeights; ivar++) {
1383 Log() << kINFO <<
"GA: estimator after optimization: " << estimator <<
Endl;
1402 for (
Int_t i = 0; i < numSynapses; i++) {
1423 for (
Int_t i = 0; i < numLayers; i++) {
1427 for (
Int_t j = 0; j < numNeurons; j++) {
1445 for (
Int_t i = numLayers-1; i >= 0; i--) {
1449 for (
Int_t j = 0; j < numNeurons; j++) {
1463 for (
Int_t i=0;i<nSynapses;i++) {
1480 std::vector<Int_t> nWDP(numRegulators);
1481 std::vector<Double_t> trace(numRegulators),weightSum(numRegulators);
1482 for (
int i=0;i<numSynapses;i++) {
1486 trace[idx]+=InvH[i][i];
1496 for (
int i=0;i<numRegulators;i++)
1499 fRegulators[i]=variance*nWDP[i]/(weightSum[i]+variance*trace[i]);
1506 Log()<<kDEBUG<<
"\n"<<
"trainE:"<<trainE<<
"\ttestE:"<<testE<<
"\tvariance:"<<variance<<
"\tgamma:"<<gamma<<
Endl;
1515 InvHessian.
ResizeTo( numSynapses, numSynapses );
1520 for (
Int_t i=0;i<nEvents;i++) {
1525 for (
Int_t j = 0; j < numSynapses; j++){
1529 sens[j][0]=sensT[0][j]=synapses->
GetDelta();
1532 else if (
fEstimator==
kCE) InvHessian+=(outputValue*(1-outputValue))*sens*sensT;
1537 for (
Int_t i = 0; i < numSynapses; i++){
1542 for (
Int_t i = 0; i < numSynapses; i++){
1543 InvHessian[i][i]+=1
e-6;
1561 Double_t MvaUpper,MvaLower,median,variance;
1571 for (
Int_t i = 0; i < numSynapses; i++){
1583 Log()<<kWARNING<<
"Negative variance!!! median=" << median <<
"\tvariance(sigma^2)=" << variance <<
Endl;
1586 variance=
sqrt(variance);
1591 *errUpper=MvaUpper-MvaValue;
1596 *errLower=MvaValue-MvaLower;
1602 #ifdef MethodMLP_UseMinuit__ 1607 void TMVA::MethodMLP::MinuitMinimize()
1624 for (
Int_t ipar=0; ipar < fNumberOfWeights; ipar++) {
1627 parName, w[ipar], 0.1, 0, 0 );
1631 tfitter->
SetFCN( &IFCN );
1670 ((
MethodMLP*)GetThisPtr())->FCN( npars, grad,
f, fitPars, iflag );
1673 TTHREAD_TLS(
Int_t) nc = 0;
1674 TTHREAD_TLS(
double) minf = 1000000;
1679 for (
Int_t ipar=0; ipar<fNumberOfWeights; ipar++) {
1688 if (
f < minf) minf =
f;
1689 for (
Int_t ipar=0; ipar<fNumberOfWeights; ipar++)
Log() << kDEBUG << fitPars[ipar] <<
" ";
1691 Log() << kDEBUG <<
"***** New estimator: " <<
f <<
" min: " << minf <<
" --> ncalls: " << nc <<
Endl;
1725 Log() << col <<
"--- Short description:" << colres <<
Endl;
1727 Log() <<
"The MLP artificial neural network (ANN) is a traditional feed-" <<
Endl;
1728 Log() <<
"forward multilayer perceptron implementation. The MLP has a user-" <<
Endl;
1729 Log() <<
"defined hidden layer architecture, while the number of input (output)" <<
Endl;
1730 Log() <<
"nodes is determined by the input variables (output classes, i.e., " <<
Endl;
1731 Log() <<
"signal and one background). " <<
Endl;
1733 Log() << col <<
"--- Performance optimisation:" << colres <<
Endl;
1735 Log() <<
"Neural networks are stable and performing for a large variety of " <<
Endl;
1736 Log() <<
"linear and non-linear classification problems. However, in contrast" <<
Endl;
1737 Log() <<
"to (e.g.) boosted decision trees, the user is advised to reduce the " <<
Endl;
1738 Log() <<
"number of input variables that have only little discrimination power. " <<
Endl;
1740 Log() <<
"In the tests we have carried out so far, the MLP and ROOT networks" <<
Endl;
1741 Log() <<
"(TMlpANN, interfaced via TMVA) performed equally well, with however" <<
Endl;
1742 Log() <<
"a clear speed advantage for the MLP. The Clermont-Ferrand neural " <<
Endl;
1743 Log() <<
"net (CFMlpANN) exhibited worse classification performance in these" <<
Endl;
1744 Log() <<
"tests, which is partly due to the slow convergence of its training" <<
Endl;
1745 Log() <<
"(at least 10k training cycles are required to achieve approximately" <<
Endl;
1746 Log() <<
"competitive results)." <<
Endl;
1748 Log() << col <<
"Overtraining: " << colres
1749 <<
"only the TMlpANN performs an explicit separation of the" <<
Endl;
1750 Log() <<
"full training sample into independent training and validation samples." <<
Endl;
1751 Log() <<
"We have found that in most high-energy physics applications the " <<
Endl;
1752 Log() <<
"available degrees of freedom (training events) are sufficient to " <<
Endl;
1753 Log() <<
"constrain the weights of the relatively simple architectures required" <<
Endl;
1754 Log() <<
"to achieve good performance. Hence no overtraining should occur, and " <<
Endl;
1755 Log() <<
"the use of validation samples would only reduce the available training" <<
Endl;
1756 Log() <<
"information. However, if the performance on the training sample is " <<
Endl;
1757 Log() <<
"found to be significantly better than the one found with the inde-" <<
Endl;
1758 Log() <<
"pendent test sample, caution is needed. The results for these samples " <<
Endl;
1759 Log() <<
"are printed to standard output at the end of each training job." <<
Endl;
1761 Log() << col <<
"--- Performance tuning via configuration options:" << colres <<
Endl;
1763 Log() <<
"The hidden layer architecture for all ANNs is defined by the option" <<
Endl;
1764 Log() <<
"\"HiddenLayers=N+1,N,...\", where here the first hidden layer has N+1" <<
Endl;
1765 Log() <<
"neurons and the second N neurons (and so on), and where N is the number " <<
Endl;
1766 Log() <<
"of input variables. Excessive numbers of hidden layers should be avoided," <<
Endl;
1767 Log() <<
"in favour of more neurons in the first hidden layer." <<
Endl;
1769 Log() <<
"The number of cycles should be above 500. As said, if the number of" <<
Endl;
1770 Log() <<
"adjustable weights is small compared to the training sample size," <<
Endl;
1771 Log() <<
"using a large number of training samples should not lead to overtraining." <<
Endl;
void WaitForKeyboard()
wait for keyboard input, for debugging
Float_t fSamplingFraction
void GetHelpMessage() const
get help message text
virtual TMatrixTBase< Element > & UnitMatrix()
Make a unit matrix (matrix need not be a square one).
virtual Double_t GetMvaValue(Double_t *err=0, Double_t *errUpper=0)
get the mva value generated by the NN
virtual Int_t Fill(Double_t x)
Increment bin with abscissa X by 1.
virtual Double_t GetMax()=0
Double_t GetMSEErr(const Event *ev, UInt_t index=0)
MsgLogger & Endl(MsgLogger &ml)
void ForceNetworkCalculations()
calculate input values to each neuron
TMatrixT< Element > & Transpose(const TMatrixT< Element > &source)
Transpose matrix source.
Singleton class for Global types used by TMVA.
void CreateWeightMonitoringHists(const TString &bulkname, std::vector< TH1 *> *hv=0) const
virtual ~MethodMLP()
destructor nothing to be done
Base class for TMVA fitters.
void AddPoint(Double_t x, Double_t y1, Double_t y2)
This function is used only in 2 TGraph case, and it will add new data points to graphs.
void Init()
default initializations
void DecayLearningRate(Double_t rate)
static const Bool_t fgPRINT_SEQ
virtual Double_t Rndm()
Machine independent random number generator.
void SetDEDw(Double_t DEDw)
Synapse class used by TMVA artificial neural network methods.
void CreateSampling() const
create an event sampling (random or importance sampling)
virtual Double_t GetMin()=0
THist< 1, float, THistStatContent, THistStatUncertainty > TH1F
OptionBase * DeclareOptionRef(T &ref, const TString &name, const TString &desc="")
void ForceValue(Double_t value)
force the value, typically for input and bias neurons
void TrainOneEpoch()
train network over a single epoch/cycle of events
void SteepestDir(TMatrixD &Dir)
virtual void MakeClassSpecific(std::ostream &, const TString &) const
write specific classifier response
void SetDir(TMatrixD &Hessian, TMatrixD &Dir)
void TrainOneEventFast(Int_t ievt, Float_t *&branchVar, Int_t &type)
fast per-event training
void ResetConvergenceCounter()
UInt_t GetNClasses() const
virtual Int_t ExecuteCommand(const char *command, Double_t *args, Int_t nargs)
Execute a fitter command; command : command string args : list of nargs command arguments.
UInt_t GetNTargets() const
void DeclareOptions()
define the options (their key words) that can be set in the option string
Double_t CalculateEstimator(Types::ETreeType treeType=Types::kTraining, Int_t iEpoch=-1)
calculate the estimator that training is attempting to minimize
Double_t GetActivationValue() const
Double_t Gamma(Double_t z)
Computation of gamma(z) for all z.
void UpdateSynapsesSequential()
update the pre-synapses for each neuron (input neuron has no pre-synapse) this method should only be ...
virtual Double_t Eval(Double_t arg)=0
void BFGSMinimize(Int_t nEpochs)
train network with BFGS algorithm
TObject * At(Int_t idx) const
virtual TMatrixTBase< Element > & ResizeTo(Int_t nrows, Int_t ncols, Int_t=-1)
Set size of the matrix to nrows x ncols New dynamic elements are created, the overlapping part of the...
void SetDirWeights(std::vector< Double_t > &Origin, TMatrixD &Dir, Double_t alpha)
void Shuffle(Int_t *index, Int_t n)
Input:
ETrainingMethod fTrainingMethod
void CalculateDelta()
calculate error field
Double_t Run()
estimator function interface for fitting
std::vector< TH1 * > fEpochMonHistB
void PrintMessage(TString message, Bool_t force=kFALSE) const
print messages, turn off printing by setting verbose and debug flag appropriately ...
const Event * GetEvent() const
Neuron class used by TMVA artificial neural network methods.
Types::ETreeType GetCurrentType() const
void GetApproxInvHessian(TMatrixD &InvHessian, bool regulate=true)
Double_t GetXmin(Int_t ivar) const
void Init(std::vector< TString > &graphTitles)
This function gets some title and it creates a TGraph for every title.
DataSetInfo & DataInfo() const
Bool_t DoRegression() const
virtual void ProcessOptions()
do nothing specific at this moment
Class that contains all the data information.
Double_t GetWeight() const
return the event weight - depending on whether the flag IgnoreNegWeightsInTraining is or not...
void TrainOneEvent(Int_t ievt)
train network over a single event this uses the new event model
void MakeClassSpecific(std::ostream &, const TString &) const
write specific classifier response
Multilayer Perceptron class built off of MethodANNBase.
TMatrixT< Element > & Invert(Double_t *det=0)
Invert the matrix and calculate its determinant.
Float_t fImprovement
current value
void AdjustSynapseWeights()
adjust the pre-synapses' weights for each neuron (input neuron has no pre-synapse) this method should...
void UpdateSynapses()
update synapse error fields and adjust the weights (if in sequential mode)
virtual void SetFCN(void(*fcn)(Int_t &, Double_t *, Double_t &f, Double_t *, Int_t))
Specify the address of the fitting algorithm.
std::vector< Double_t > fPriorDev
std::vector< Float_t > & GetTargets()
Double_t GetCEErr(const Event *ev, UInt_t index=0)
void GeneticMinimize()
create genetics class similar to GeneticCut give it vector of parameter ranges (parameters = weights)...
UInt_t GetNEvents() const
temporary event when testing on a different DataSet than the own one
Double_t GetXmax(Int_t ivar) const
TMatrixT< Double_t > TMatrixD
Bool_t DoMulticlass() const
void InitializeLearningRates()
initialize learning rates of synapses, used only by back propagation
void DecaySynapseWeights(Bool_t lateEpoch)
decay synapse weights in last 10 epochs, lower learning rate even more to find a good minimum ...
Double_t GetDesiredOutput(const Event *ev)
get the desired output of this event
virtual void PrintNetwork() const
print network representation, for debugging
void SetLearningRate(Double_t rate)
VecExpr< UnaryOp< Fabs< T >, VecExpr< A, T, D >, T >, T, D > fabs(const VecExpr< A, T, D > &rhs)
Bool_t GetHessian(TMatrixD &Hessian, TMatrixD &Gamma, TMatrixD &Delta)
Float_t GetTarget(UInt_t itgt) const
UInt_t GetNTargets() const
Bool_t HasConverged(Bool_t withinConvergenceBand=kFALSE)
gives back true if the last "steps" steps have lead to an improvement of the "fitness" of the "indivi...
Double_t GetMvaValue(Double_t *err=0, Double_t *errUpper=0)
get the mva value generated by the NN
void SetWeight(Double_t weight)
set synapse weight
void BackPropagationMinimize(Int_t nEpochs)
minimize estimator / train network with back propagation algorithm
const char * GetName() const
The TMVA::Interval Class.
Int_t fSteps
minimum improvement which counts as improvement
void UpdateSynapsesBatch()
update and adjust the pre-synapses for each neuron (input neuron has no pre-synapse) this method shou...
Float_t Progress()
returns a float from 0 (just started) to 1 (finished)
Int_t GetEntriesFast() const
char * Form(const char *fmt,...)
std::vector< TH1 * > fEpochMonHistW
std::vector< Double_t > fRegulators
Bool_t LineSearch(TMatrixD &Dir, std::vector< Double_t > &Buffer, Double_t *dError=0)
TNeuron * GetInputNeuron(Int_t index)
void UpdateNetwork(Double_t desired, Double_t eventWeight=1.0)
update the network based on how closely the output matched the desired output
static const Bool_t fgPRINT_BATCH
std::vector< Int_t > fRegulatorIdx
Double_t GetValue() const
Bool_t IgnoreEventsWithNegWeightsInTraining() const
void SetGammaDelta(TMatrixD &Gamma, TMatrixD &Delta, std::vector< Double_t > &Buffer)
void EventResult(Bool_t successful, Long64_t evtNumber=-1)
increase the importance sampling weight of the event when not successful and decrease it when success...
void CalculateNeuronDeltas()
have each neuron calculate its delta by back propagation
Bool_t WriteOptionsReference() const
TNeuron * GetOutputNeuron(Int_t index=0)
Bool_t IsNormalised() const
void ForceNetworkInputs(const Event *ev, Int_t ignoreIndex=-1)
force the input values of the input neurons force the value for each input neuron ...
void SetCurrentType(Types::ETreeType type) const
void SimulateEvent(const Event *ev)
you should not use this method at all Int_t Int_t Double_t Double_t Double_t e
void AdjustSynapseWeights()
just adjust the synapse weights (should be called in batch mode)
void AddPreDefVal(const T &)
const TString & GetOptions() const
virtual Bool_t HasAnalysisType(Types::EAnalysisType type, UInt_t numberClasses, UInt_t numberTargets)
MLP can handle classification with 2 classes and regression with one regression-target.
#define REGISTER_METHOD(CLASS)
for example
void ProcessOptions()
process user options
IPythonInteractive * fInteractive
void CalculateDelta()
calculate/adjust the error field for this synapse
Double_t EstimatorFunction(std::vector< Double_t > ¶meters)
interface to the estimate
Double_t ComputeEstimator(std::vector< Double_t > ¶meters)
this function is called by GeneticANN for GA optimization
Long64_t GetNEvents(Types::ETreeType type=Types::kMaxTreeType) const
std::vector< TH1 * > fEpochMonHistS
Bool_t IsSignal(const Event *ev) const
void DrawProgressBar(Int_t, const TString &comment="")
draws progress bar in color or B&W caution:
Types::EAnalysisType GetAnalysisType() const
std::vector< std::pair< Float_t, Float_t > > * fDeviationsFromTargets
void SetError(Double_t error)
set error, this should only be done for an output neuron
void SetCurrentValue(Float_t value)
Double_t Sqrt(Double_t x)
TH1F * fEstimatorHistTrain
Double_t DerivDir(TMatrixD &Dir)
void InitSampling(Float_t fraction, Float_t weight, UInt_t seed=0)
initialize random or importance sampling
Base class for all TMVA methods using artificial neural networks.
Fitter using a Genetic Algorithm.
Float_t GetCurrentValue()
Timing information for training and evaluation of MVA methods.
MethodMLP(const TString &jobName, const TString &methodTitle, DataSetInfo &theData, const TString &theOption)
standard constructor
virtual Double_t EvalDerivative(Double_t arg)=0
virtual void SetAnalysisType(Types::EAnalysisType type)
TH1F * fEstimatorHistTest
void SetSignalReferenceCut(Double_t cut)
std::vector< Float_t > * GetTargetsForMulticlass(const Event *ev)
const char * Data() const
virtual Int_t SetParameter(Int_t ipar, const char *parname, Double_t value, Double_t verr, Double_t vlow, Double_t vhigh)
set initial values for a parameter ipar : parameter number parname : parameter name value : initial p...