76 #ifdef MethodMLP_UseMinuit__ 95 fUseRegulator(false), fCalculateErrors(false),
96 fPrior(0.0), fPriorDev(0), fUpdateLimit(0),
97 fTrainingMethod(kBFGS), fTrainMethodS(
"BFGS"),
98 fSamplingFraction(1.0), fSamplingEpoch(0.0), fSamplingWeight(0.0),
99 fSamplingTraining(false), fSamplingTesting(false),
100 fLastAlpha(0.0), fTau(0.),
101 fResetStep(0), fLearnRate(0.0), fDecayRate(0.0),
102 fBPMode(kSequential), fBpModeS(
"None"),
103 fBatchSize(0), fTestRate(0), fEpochMon(false),
104 fGA_nsteps(0), fGA_preCalc(0), fGA_SC_steps(0),
105 fGA_SC_rate(0), fGA_SC_factor(0.0),
106 fDeviationsFromTargets(0),
168 #ifdef MethodMLP_UseMinuit__ 194 "Train with Back-Propagation (BP), BFGS Algorithm (BFGS), or Genetic Algorithm (GA - slower and worse)");
202 DeclareOptionRef(
fEpochMon =
kFALSE,
"EpochMonitoring",
"Provide epoch-wise monitoring plots according to TestRate (caution: causes big ROOT output file!)" );
205 DeclareOptionRef(
fSamplingEpoch=1.0,
"SamplingEpoch",
"Sampling is used for the first 'SamplingEpoch' epochs, afterwards, all events are taken for training");
206 DeclareOptionRef(
fSamplingWeight=1.0,
"SamplingImportance",
" The sampling weights of events in epochs which successful (worse estimator than before) are multiplied with SamplingImportance, else they are divided.");
215 "Back-propagation learning mode: sequential or batch");
220 "Batch size: number of events/batch, only set if in Batch Mode, -1 for BatchSize=number_of_events");
223 "Minimum improvement which counts as improvement (<0 means automatic convergence check is turned off)");
226 "Number of steps (without improvement) required for convergence (<0 means automatic convergence check is turned off)");
229 "Use regulator to avoid over-training");
231 "Maximum times of regulator update");
233 "Calculates inverse Hessian matrix at the end of the training to be able to calculate the uncertainties of an MVA value");
236 "Take the events for the estimator calculations from small deviations from the desired value to large deviations only over the weight range");
251 <<
"Will ignore negative events in training!" 268 if (fBatchSize < 1 || fBatchSize > numEvents)
fBatchSize = numEvents;
280 for (
Int_t i = 0; i < numSynapses; i++) {
293 Log() <<
kFATAL <<
"<CalculateEstimator> fatal error: wrong tree type: " << treeType <<
Endl;
309 histS =
new TH1F( nameS, nameS, nbin, -limit, limit );
310 histB =
new TH1F( nameB, nameB, nbin, -limit, limit );
342 for (
UInt_t itgt = 0; itgt < nTgts; itgt++) {
353 for (
UInt_t icls = 0; icls < nClasses; icls++) {
355 norm +=
exp( activationValue );
357 d =
exp( activationValue );
362 for (
UInt_t icls = 0; icls < nClasses; icls++) {
363 Double_t desired = (icls==cls) ? 1.0 : 0.0;
365 d = (desired-
v)*(desired-
v);
384 if (
DataInfo().IsSignal(ev) && histS != 0) histS->
Fill(
float(
v),
float(w) );
385 else if (histB != 0) histB->
Fill(
float(
v),
float(w) );
398 float deviation = (*itDev).first;
399 float devWeight = (*itDev).second;
400 weightSum += devWeight;
401 if( weightSum <= weightRangeCut ) {
402 estimator += devWeight*deviation;
406 sumOfWeights = sumOfWeightsInRange;
418 else estimator = estimator/
Float_t(sumOfWeights);
439 Log() <<
kFATAL <<
"ANN Network is not initialized, doing it now!"<<
Endl;
449 if (nSynapses>nEvents)
450 Log()<<
kWARNING<<
"ANN too complicated: #events="<<nEvents<<
"\t#synapses="<<nSynapses<<
Endl;
454 std::vector<TString> titles = {
"Error on training set",
"Error on test set"};
458 #ifdef MethodMLP_UseMinuit__ 459 if (useMinuit) MinuitMinimize();
469 Log()<<
kINFO<<
"Finalizing handling of Regulator terms, trainE="<<trainE<<
" testE="<<testE<<
Endl;
471 Log()<<
kINFO<<
"Done with handling of Regulator terms"<<
Endl;
501 Int_t nWeights = nSynapses;
503 for (
Int_t i=0;i<nSynapses;i++) {
508 std::vector<Double_t> buffer( nWeights );
509 for (
Int_t i=0;i<nWeights;i++) buffer[i] = 0.;
512 TMatrixD Hessian ( nWeights, nWeights );
516 Int_t RegUpdateTimes=0;
531 for (
Int_t i = 0; i < nEpochs; i++) {
577 else SetDir( Hessian, Dir );
592 Log() <<
kFATAL <<
"Line search failed! Huge troubles somewhere..." <<
Endl;
600 if (
fUseRegulator && RegUpdateTimes<fUpdateLimit && RegUpdateCD>=5 &&
fabs(dError)<0.1*AccuError) {
601 Log()<<
kDEBUG<<
"\n\nUpdate regulators "<<RegUpdateTimes<<
" on epoch "<<i<<
"\tdError="<<dError<<
Endl;
640 TString convText =
Form(
"<D^2> (train/test/epoch): %.4g/%.4g/%d", trainE, testE,i );
652 if (progress2>progress) progress=progress2;
657 if (progress<i) progress=i;
677 for (
Int_t i=0;i<nSynapses;i++) {
679 Gamma[IDX++][0] = -synapse->
GetDEDw();
682 for (
Int_t i=0;i<nWeights;i++) Delta[i][0] = buffer[i];
687 for (
Int_t i=0;i<nSynapses;i++)
690 Gamma[IDX++][0] += synapse->
GetDEDw();
699 for (
Int_t i=0;i<nSynapses;i++) {
717 for (
Int_t j=0;j<nSynapses;j++) {
723 for (
Int_t i=0;i<nSynapses;i++) {
727 synapse->
SetDEDw( DEDw / nPosEvents );
742 for (
UInt_t itgt = 0; itgt < ntgt; itgt++) {
750 for (
UInt_t icls = 0; icls < nClasses; icls++) {
751 Double_t desired = ( cls==icls ? 1.0 : 0.0 );
778 for (
Int_t i=0;i<nSynapses;i++) {
780 Dir[IDX++][0] = -synapse->
GetDEDw();
813 for (
Int_t i=0;i<nSynapses;i++) {
815 DEDw[IDX++][0] = synapse->
GetDEDw();
818 dir = Hessian * DEDw;
819 for (
Int_t i=0;i<IDX;i++) dir[i][0] = -dir[i][0];
830 for (
Int_t i=0;i<nSynapses;i++) {
832 Result += Dir[IDX++][0] * synapse->
GetDEDw();
843 Int_t nWeights = nSynapses;
845 std::vector<Double_t> Origin(nWeights);
846 for (
Int_t i=0;i<nSynapses;i++) {
857 if (alpha2 < 0.01) alpha2 = 0.01;
858 else if (alpha2 > 2.0) alpha2 = 2.0;
870 for (
Int_t i=0;i<100;i++) {
889 for (
Int_t i=0;i<100;i++) {
892 Log() <<
kWARNING <<
"linesearch, starting to investigate direction opposite of steepestDIR" <<
Endl;
893 alpha2 = -alpha_original;
906 Log() <<
kWARNING <<
"linesearch, failed even in opposite direction of steepestDIR" <<
Endl;
912 if (alpha1>0 && alpha2>0 && alpha3 > 0) {
914 (err3 - err1) / ((err3 - err2) / ( alpha3 - alpha2 )
915 - ( err2 - err1 ) / (alpha2 - alpha1 )));
930 if (finalError > err1) {
931 Log() <<
kWARNING <<
"Line search increased error! Something is wrong." 932 <<
"fLastAlpha=" <<
fLastAlpha <<
"al123=" << alpha1 <<
" " 933 << alpha2 <<
" " << alpha3 <<
" err1="<< err1 <<
" errfinal=" << finalError <<
Endl;
936 for (
Int_t i=0;i<nSynapses;i++) {
938 buffer[IDX] = synapse->
GetWeight() - Origin[IDX];
942 if (dError) (*dError)=(errOrigin-finalError)/finalError;
954 for (
Int_t i=0;i<nSynapses;i++) {
956 synapse->
SetWeight( Origin[IDX] + Dir[IDX][0] * alpha );
982 for (
UInt_t itgt = 0; itgt < ntgts; itgt++) {
986 for(
UInt_t icls = 0, iclsEnd =
DataInfo().GetNClasses(); icls < iclsEnd; icls++ ){
1011 error = 0.5*(output-target)*(output-target);
1062 for (
Int_t i = 0; i < nEpochs; i++) {
1115 if (lateEpoch > i) lateEpoch = i;
1122 TString convText =
Form(
"<D^2> (train/test): %.4g/%.4g", trainE, testE );
1192 for (
Int_t i = 0; i <
n; i++) {
1196 index[j] = index[i];
1210 for (
Int_t i = 0; i < numSynapses; i++) {
1290 else Log() <<
kFATAL <<
"Estimator type unspecified!!" <<
Endl;
1291 error *= eventWeight;
1303 for (
UInt_t i = 0, iEnd = desired.size(); i < iEnd; ++i) {
1305 error *= eventWeight;
1325 for (
Int_t i = numLayers-1; i >= 0; i--) {
1329 for (
Int_t j = 0; j < numNeurons; j++) {
1356 std::vector<Interval*> ranges;
1359 for (
Int_t ivar=0; ivar< numWeights; ivar++) {
1367 Log() <<
kINFO <<
"GA: estimator after optimization: " << estimator <<
Endl;
1386 for (
Int_t i = 0; i < numSynapses; i++) {
1407 for (
Int_t i = 0; i < numLayers; i++) {
1411 for (
Int_t j = 0; j < numNeurons; j++) {
1429 for (
Int_t i = numLayers-1; i >= 0; i--) {
1433 for (
Int_t j = 0; j < numNeurons; j++) {
1447 for (
Int_t i=0;i<nSynapses;i++) {
1464 std::vector<Int_t> nWDP(numRegulators);
1465 std::vector<Double_t> trace(numRegulators),weightSum(numRegulators);
1466 for (
int i=0;i<numSynapses;i++) {
1470 trace[idx]+=InvH[i][i];
1480 for (
int i=0;i<numRegulators;i++)
1483 fRegulators[i]=variance*nWDP[i]/(weightSum[i]+variance*trace[i]);
1490 Log()<<
kDEBUG<<
"\n"<<
"trainE:"<<trainE<<
"\ttestE:"<<testE<<
"\tvariance:"<<variance<<
"\tgamma:"<<gamma<<
Endl;
1499 InvHessian.
ResizeTo( numSynapses, numSynapses );
1509 for (
Int_t j = 0; j < numSynapses; j++){
1513 sens[j][0]=sensT[0][j]=synapses->
GetDelta();
1516 else if (
fEstimator==
kCE) InvHessian+=(outputValue*(1-outputValue))*sens*sensT;
1521 for (
Int_t i = 0; i < numSynapses; i++){
1526 for (
Int_t i = 0; i < numSynapses; i++){
1527 InvHessian[i][i]+=1
e-6;
1545 Double_t MvaUpper,MvaLower,median,variance;
1555 for (
Int_t i = 0; i < numSynapses; i++){
1567 Log()<<
kWARNING<<
"Negative variance!!! median=" << median <<
"\tvariance(sigma^2)=" << variance <<
Endl;
1570 variance=
sqrt(variance);
1575 *errUpper=MvaUpper-MvaValue;
1580 *errLower=MvaValue-MvaLower;
1586 #ifdef MethodMLP_UseMinuit__ 1591 void TMVA::MethodMLP::MinuitMinimize()
1608 for (
Int_t ipar=0; ipar < fNumberOfWeights; ipar++) {
1611 parName, w[ipar], 0.1, 0, 0 );
1615 tfitter->
SetFCN( &IFCN );
1638 _____________________________________________________________________________
1654 ((
MethodMLP*)GetThisPtr())->
FCN( npars, grad,
f, fitPars, iflag );
1657 TTHREAD_TLS(
Int_t) nc = 0;
1658 TTHREAD_TLS(
double) minf = 1000000;
1663 for (
Int_t ipar=0; ipar<fNumberOfWeights; ipar++) {
1672 if (
f < minf) minf =
f;
1673 for (
Int_t ipar=0; ipar<fNumberOfWeights; ipar++)
Log() <<
kDEBUG << fitPars[ipar] <<
" ";
1675 Log() <<
kDEBUG <<
"***** New estimator: " <<
f <<
" min: " << minf <<
" --> ncalls: " << nc <<
Endl;
1709 Log() << col <<
"--- Short description:" << colres <<
Endl;
1711 Log() <<
"The MLP artificial neural network (ANN) is a traditional feed-" <<
Endl;
1712 Log() <<
"forward multilayer perceptron impementation. The MLP has a user-" <<
Endl;
1713 Log() <<
"defined hidden layer architecture, while the number of input (output)" <<
Endl;
1714 Log() <<
"nodes is determined by the input variables (output classes, i.e., " <<
Endl;
1715 Log() <<
"signal and one background). " <<
Endl;
1717 Log() << col <<
"--- Performance optimisation:" << colres <<
Endl;
1719 Log() <<
"Neural networks are stable and performing for a large variety of " <<
Endl;
1720 Log() <<
"linear and non-linear classification problems. However, in contrast" <<
Endl;
1721 Log() <<
"to (e.g.) boosted decision trees, the user is advised to reduce the " <<
Endl;
1722 Log() <<
"number of input variables that have only little discrimination power. " <<
Endl;
1724 Log() <<
"In the tests we have carried out so far, the MLP and ROOT networks" <<
Endl;
1725 Log() <<
"(TMlpANN, interfaced via TMVA) performed equally well, with however" <<
Endl;
1726 Log() <<
"a clear speed advantage for the MLP. The Clermont-Ferrand neural " <<
Endl;
1727 Log() <<
"net (CFMlpANN) exhibited worse classification performance in these" <<
Endl;
1728 Log() <<
"tests, which is partly due to the slow convergence of its training" <<
Endl;
1729 Log() <<
"(at least 10k training cycles are required to achieve approximately" <<
Endl;
1730 Log() <<
"competitive results)." <<
Endl;
1732 Log() << col <<
"Overtraining: " << colres
1733 <<
"only the TMlpANN performs an explicit separation of the" <<
Endl;
1734 Log() <<
"full training sample into independent training and validation samples." <<
Endl;
1735 Log() <<
"We have found that in most high-energy physics applications the " <<
Endl;
1736 Log() <<
"avaliable degrees of freedom (training events) are sufficient to " <<
Endl;
1737 Log() <<
"constrain the weights of the relatively simple architectures required" <<
Endl;
1738 Log() <<
"to achieve good performance. Hence no overtraining should occur, and " <<
Endl;
1739 Log() <<
"the use of validation samples would only reduce the available training" <<
Endl;
1740 Log() <<
"information. However, if the perrormance on the training sample is " <<
Endl;
1741 Log() <<
"found to be significantly better than the one found with the inde-" <<
Endl;
1742 Log() <<
"pendent test sample, caution is needed. The results for these samples " <<
Endl;
1743 Log() <<
"are printed to standard output at the end of each training job." <<
Endl;
1745 Log() << col <<
"--- Performance tuning via configuration options:" << colres <<
Endl;
1747 Log() <<
"The hidden layer architecture for all ANNs is defined by the option" <<
Endl;
1748 Log() <<
"\"HiddenLayers=N+1,N,...\", where here the first hidden layer has N+1" <<
Endl;
1749 Log() <<
"neurons and the second N neurons (and so on), and where N is the number " <<
Endl;
1750 Log() <<
"of input variables. Excessive numbers of hidden layers should be avoided," <<
Endl;
1751 Log() <<
"in favour of more neurons in the first hidden layer." <<
Endl;
1753 Log() <<
"The number of cycles should be above 500. As said, if the number of" <<
Endl;
1754 Log() <<
"adjustable weights is small compared to the training sample size," <<
Endl;
1755 Log() <<
"using a large number of training samples should not lead to overtraining." <<
Endl;
void WaitForKeyboard()
wait for keyboard input, for debugging
Float_t fSamplingFraction
void GetHelpMessage() const
get help message text
virtual TMatrixTBase< Element > & UnitMatrix()
Make a unit matrix (matrix need not be a square one).
virtual Double_t GetMvaValue(Double_t *err=0, Double_t *errUpper=0)
get the mva value generated by the NN
virtual Int_t Fill(Double_t x)
Increment bin with abscissa X by 1.
virtual Double_t GetMax()=0
Double_t GetMSEErr(const Event *ev, UInt_t index=0)
MsgLogger & Endl(MsgLogger &ml)
void ForceNetworkCalculations()
calculate input values to each neuron
TMatrixT< Element > & Transpose(const TMatrixT< Element > &source)
Transpose matrix source.
void CreateWeightMonitoringHists(const TString &bulkname, std::vector< TH1 *> *hv=0) const
virtual ~MethodMLP()
destructor nothing to be done
void AddPoint(Double_t x, Double_t y1, Double_t y2)
This function is used only in 2 TGraph case, and it will add new data points to graphs.
void Init()
default initializations
void ForceValue(Double_t value)
force the value, typically for input and bias neurons
void DecayLearningRate(Double_t rate)
static const Bool_t fgPRINT_SEQ
virtual Double_t Rndm()
Machine independent random number generator.
void SetDEDw(Double_t DEDw)
void CreateSampling() const
create an event sampling (random or importance sampling)
virtual Double_t GetMin()=0
THist< 1, float, THistStatContent, THistStatUncertainty > TH1F
OptionBase * DeclareOptionRef(T &ref, const TString &name, const TString &desc="")
void TrainOneEpoch()
train network over a single epoch/cyle of events
void SteepestDir(TMatrixD &Dir)
virtual void MakeClassSpecific(std::ostream &, const TString &) const
write specific classifier response
void SetDir(TMatrixD &Hessian, TMatrixD &Dir)
void DrawProgressBar(Int_t, const TString &comment="")
draws progress bar in color or B&W caution:
void TrainOneEventFast(Int_t ievt, Float_t *&branchVar, Int_t &type)
fast per-event training
void ResetConvergenceCounter()
UInt_t GetNClasses() const
virtual Int_t ExecuteCommand(const char *command, Double_t *args, Int_t nargs)
Execute a fitter command; command : command string args : list of nargs command arguments.
UInt_t GetNTargets() const
void DeclareOptions()
define the options (their key words) that can be set in the option string know options: TrainingMetho...
Double_t CalculateEstimator(Types::ETreeType treeType=Types::kTraining, Int_t iEpoch=-1)
calculate the estimator that training is attempting to minimize
Double_t GetActivationValue() const
Double_t Gamma(Double_t z)
Computation of gamma(z) for all z.
virtual Double_t Eval(Double_t arg)=0
void BFGSMinimize(Int_t nEpochs)
train network with BFGS algorithm
TObject * At(Int_t idx) const
virtual TMatrixTBase< Element > & ResizeTo(Int_t nrows, Int_t ncols, Int_t=-1)
Set size of the matrix to nrows x ncols New dynamic elements are created, the overlapping part of the...
void SetDirWeights(std::vector< Double_t > &Origin, TMatrixD &Dir, Double_t alpha)
virtual void SetFCN(void *fcn) R__DEPRECATED(6
Specify the address of the fitting algorithm (from the interpreter)
void AdjustSynapseWeights()
adjust the pre-synapses' weights for each neuron (input neuron has no pre-synapse) this method should...
void Shuffle(Int_t *index, Int_t n)
Input: index: the array to shuffle n: the size of the array Output: index: the shuffled indexes This ...
ETrainingMethod fTrainingMethod
Double_t Run()
estimator function interface for fitting
void(* FCN)(Int_t &npar, Double_t *gin, Double_t &f, Double_t *u, Int_t flag)
std::vector< TH1 * > fEpochMonHistB
void PrintMessage(TString message, Bool_t force=kFALSE) const
print messages, turn off printing by setting verbose and debug flag appropriately ...
const Event * GetEvent() const
Types::ETreeType GetCurrentType() const
void CalculateDelta()
calculate/adjust the error field for this synapse
void GetApproxInvHessian(TMatrixD &InvHessian, bool regulate=true)
Double_t GetXmin(Int_t ivar) const
void Init(std::vector< TString > &graphTitles)
This function gets some title and it creates a TGraph for every title.
DataSetInfo & DataInfo() const
Bool_t DoRegression() const
virtual void ProcessOptions()
do nothing specific at this moment
Double_t GetWeight() const
return the event weight - depending on whether the flag IgnoreNegWeightsInTraining is or not...
void TrainOneEvent(Int_t ievt)
train network over a single event this uses the new event model
void MakeClassSpecific(std::ostream &, const TString &) const
write specific classifier response
TMatrixT< Element > & Invert(Double_t *det=0)
Invert the matrix and calculate its determinant.
Float_t fImprovement
current value
void UpdateSynapses()
update synapse error fields and adjust the weights (if in sequential mode)
std::vector< Double_t > fPriorDev
std::vector< Float_t > & GetTargets()
Double_t GetCEErr(const Event *ev, UInt_t index=0)
void GeneticMinimize()
create genetics class similar to GeneticCut give it vector of parameter ranges (parameters = weights)...
UInt_t GetNEvents() const
temporary event when testing on a different DataSet than the own one
Double_t GetXmax(Int_t ivar) const
TMatrixT< Double_t > TMatrixD
Bool_t DoMulticlass() const
void InitializeLearningRates()
initialize learning rates of synapses, used only by backpropagation
void DecaySynapseWeights(Bool_t lateEpoch)
decay synapse weights in last 10 epochs, lower learning rate even more to find a good minimum ...
Double_t GetDesiredOutput(const Event *ev)
get the desired output of this event
virtual void PrintNetwork() const
print network representation, for debugging
void SetLearningRate(Double_t rate)
VecExpr< UnaryOp< Fabs< T >, VecExpr< A, T, D >, T >, T, D > fabs(const VecExpr< A, T, D > &rhs)
Bool_t GetHessian(TMatrixD &Hessian, TMatrixD &Gamma, TMatrixD &Delta)
Float_t GetTarget(UInt_t itgt) const
UInt_t GetNTargets() const
Bool_t HasConverged(Bool_t withinConvergenceBand=kFALSE)
gives back true if the last "steps" steps have lead to an improvement of the "fitness" of the "indivi...
Double_t GetMvaValue(Double_t *err=0, Double_t *errUpper=0)
get the mva value generated by the NN
void BackPropagationMinimize(Int_t nEpochs)
minimize estimator / train network with backpropagation algorithm
const char * GetName() const
Int_t fSteps
minimum improvement which counts as improvement
Float_t Progress()
returns a float from 0 (just started) to 1 (finished)
Int_t GetEntriesFast() const
char * Form(const char *fmt,...)
std::vector< TH1 * > fEpochMonHistW
std::vector< Double_t > fRegulators
Bool_t LineSearch(TMatrixD &Dir, std::vector< Double_t > &Buffer, Double_t *dError=0)
TNeuron * GetInputNeuron(Int_t index)
void UpdateNetwork(Double_t desired, Double_t eventWeight=1.0)
update the network based on how closely the output matched the desired output
static const Bool_t fgPRINT_BATCH
std::vector< Int_t > fRegulatorIdx
Double_t GetValue() const
void SetError(Double_t error)
set error, this should only be done for an output neuron
Bool_t IgnoreEventsWithNegWeightsInTraining() const
void SetGammaDelta(TMatrixD &Gamma, TMatrixD &Delta, std::vector< Double_t > &Buffer)
void EventResult(Bool_t successful, Long64_t evtNumber=-1)
increase the importance sampling weight of the event when not successful and decrease it when success...
void CalculateNeuronDeltas()
have each neuron calculate its delta by backpropagation
Bool_t WriteOptionsReference() const
TNeuron * GetOutputNeuron(Int_t index=0)
Bool_t IsNormalised() const
void ForceNetworkInputs(const Event *ev, Int_t ignoreIndex=-1)
force the input values of the input neurons force the value for each input neuron ...
void SetCurrentType(Types::ETreeType type) const
void SimulateEvent(const Event *ev)
you should not use this method at all Int_t Int_t Double_t Double_t Double_t e
void AdjustSynapseWeights()
just adjust the synapse weights (should be called in batch mode)
void AddPreDefVal(const T &)
void UpdateSynapsesSequential()
update the pre-synapses for each neuron (input neuron has no pre-synapse) this method should only be ...
void UpdateSynapsesBatch()
update and adjust the pre-synapses for each neuron (input neuron has no pre-synapse) this method shou...
const TString & GetOptions() const
void SetWeight(Double_t weight)
set synapse weight
virtual Bool_t HasAnalysisType(Types::EAnalysisType type, UInt_t numberClasses, UInt_t numberTargets)
MLP can handle classification with 2 classes and regression with one regression-target.
#define REGISTER_METHOD(CLASS)
for example
void ProcessOptions()
process user options
IPythonInteractive * fInteractive
Double_t EstimatorFunction(std::vector< Double_t > ¶meters)
interface to the estimate
Double_t ComputeEstimator(std::vector< Double_t > ¶meters)
this function is called by GeneticANN for GA optimization
Long64_t GetNEvents(Types::ETreeType type=Types::kMaxTreeType) const
std::vector< TH1 * > fEpochMonHistS
Bool_t IsSignal(const Event *ev) const
Types::EAnalysisType GetAnalysisType() const
std::vector< std::pair< Float_t, Float_t > > * fDeviationsFromTargets
void SetCurrentValue(Float_t value)
Double_t Sqrt(Double_t x)
TH1F * fEstimatorHistTrain
Double_t DerivDir(TMatrixD &Dir)
void CalculateDelta()
calculate error field
double norm(double *x, double *p)
void InitSampling(Float_t fraction, Float_t weight, UInt_t seed=0)
initialize random or importance sampling
Float_t GetCurrentValue()
MethodMLP(const TString &jobName, const TString &methodTitle, DataSetInfo &theData, const TString &theOption)
standard constructor
virtual Double_t EvalDerivative(Double_t arg)=0
virtual void SetAnalysisType(Types::EAnalysisType type)
TH1F * fEstimatorHistTest
void SetSignalReferenceCut(Double_t cut)
std::vector< Float_t > * GetTargetsForMulticlass(const Event *ev)
const char * Data() const
virtual Int_t SetParameter(Int_t ipar, const char *parname, Double_t value, Double_t verr, Double_t vlow, Double_t vhigh)
set initial values for a parameter ipar : parameter number parname : parameter name value : initial p...