29void MakeImagesTree(
int n,
int nh,
int nw)
33 const int ntot = nh * nw;
36 const int nRndmEvts = 10000;
37 double delta_sigma = 0.1;
38 double pixelNoise = 5;
42 double sX2 = sX1 + delta_sigma;
43 double sY2 = sY1 - delta_sigma;
45 auto h1 =
new TH2D(
"h1",
"h1", nh, 0, 10, nw, 0, 10);
46 auto h2 =
new TH2D(
"h2",
"h2", nh, 0, 10, nw, 0, 10);
48 auto f1 =
new TF2(
"f1",
"xygaus");
49 auto f2 =
new TF2(
"f2",
"xygaus");
50 TTree sgn(
"sig_tree",
"signal_tree");
51 TTree bkg(
"bkg_tree",
"bakground_tree");
53 TFile f(fileOutName,
"RECREATE");
55 std::vector<float>
x1(ntot);
56 std::vector<float>
x2(ntot);
61 std::vector<float> *px1 = &
x1;
62 std::vector<float> *px2 = &
x2;
64 bkg.Branch(
"vars",
"std::vector<float>", &px1);
65 sgn.Branch(
"vars",
"std::vector<float>", &px2);
73 f2->SetParameters(1, 5, sX2, 5, sY2);
75 std::cout <<
"Filling ROOT tree " << std::endl;
76 for (
int i = 0; i <
n; ++i) {
78 std::cout <<
"Generating image event ... " << i << std::endl;
88 h2->FillRandom(
"f2", nRndmEvts);
90 for (
int k = 0; k < nh; ++k) {
91 for (
int l = 0;
l < nw; ++
l) {
104 Info(
"MakeImagesTree",
"Signal and background tree with images data written to the file %s",
f.GetName());
110void TMVA_CNN_Classification(std::vector<bool> opt = {1, 1, 1, 1, 1})
113 bool useTMVACNN = (opt.size() > 0) ? opt[0] : false;
114 bool useKerasCNN = (opt.size() > 1) ? opt[1] : false;
115 bool useTMVADNN = (opt.size() > 2) ? opt[2] : false;
116 bool useTMVABDT = (opt.size() > 3) ? opt[3] : false;
117 bool usePyTorchCNN = (opt.size() > 4) ? opt[4] : false;
118#ifndef R__HAS_TMVACPU
119#ifndef R__HAS_TMVAGPU
120 Warning(
"TMVA_CNN_Classification",
121 "TMVA is not build with GPU or CPU multi-thread support. Cannot use TMVA Deep Learning for CNN");
126 bool writeOutputFile =
true;
133 if (num_threads >= 0) {
150 TFile *outputFile =
nullptr;
152 outputFile =
TFile::Open(
"TMVA_CNN_ClassificationOutput.root",
"RECREATE");
177 "TMVA_CNN_Classification", outputFile,
178 "!V:ROC:!Silent:Color:AnalysisType=Classification:Transformations=None:!Correlations");
203 int imgSize = 16 * 16;
204 TString inputFileName =
"images_data_16x16.root";
210 MakeImagesTree(5000, 16, 16);
217 Error(
"TMVA_CNN_Classification",
"Error opening input file %s - exit", inputFileName.
Data());
223 TTree *signalTree = (
TTree *)inputFile->Get(
"sig_tree");
224 TTree *backgroundTree = (
TTree *)inputFile->Get(
"bkg_tree");
227 int nEventsBkg = backgroundTree->
GetEntries();
258 int nTrainSig = 0.8 * nEventsSig;
259 int nTrainBkg = 0.8 * nEventsBkg;
263 "nTrain_Signal=%d:nTrain_Background=%d:SplitMode=Random:SplitSeed=100:NormMode=NumEvents:!V:!CalcCorrelations",
264 nTrainSig, nTrainBkg);
291 "!V:NTrees=400:MinNodeSize=2.5%:MaxDepth=2:BoostType=AdaBoost:AdaBoostBeta=0.5:"
292 "UseBaggedBoost:BaggedSampleFraction=0.5:SeparationType=GiniIndex:nCuts=20");
306 "Layout=DENSE|100|RELU,BNORM,DENSE|100|RELU,BNORM,DENSE|100|RELU,BNORM,DENSE|100|RELU,DENSE|1|LINEAR");
311 TString trainingString1(
"LearningRate=1e-3,Momentum=0.9,Repetitions=1,"
312 "ConvergenceSteps=5,BatchSize=100,TestRepetitions=1,"
313 "MaxEpochs=20,WeightDecay=1e-4,Regularization=None,"
314 "Optimizer=ADAM,DropConfig=0.0+0.0+0.0+0.");
316 TString trainingStrategyString(
"TrainingStrategy=");
317 trainingStrategyString += trainingString1;
321 TString dnnOptions(
"!H:V:ErrorStrategy=CROSSENTROPY:VarTransform=None:"
322 "WeightInitialization=XAVIER");
323 dnnOptions.Append(
":");
324 dnnOptions.Append(layoutString);
325 dnnOptions.Append(
":");
326 dnnOptions.Append(trainingStrategyString);
328 TString dnnMethodName =
"TMVA_DNN_CPU";
331 dnnOptions +=
":Architecture=GPU";
332 dnnMethodName =
"TMVA_DNN_GPU";
333#elif defined(R__HAS_TMVACPU)
334 dnnOptions +=
":Architecture=CPU";
372 TString inputLayoutString(
"InputLayout=1|16|16");
375 TString layoutString(
"Layout=CONV|10|3|3|1|1|1|1|RELU,BNORM,CONV|10|3|3|1|1|1|1|RELU,MAXPOOL|2|2|1|1,"
376 "RESHAPE|FLAT,DENSE|100|RELU,DENSE|1|LINEAR");
379 TString trainingString1(
"LearningRate=1e-3,Momentum=0.9,Repetitions=1,"
380 "ConvergenceSteps=5,BatchSize=100,TestRepetitions=1,"
381 "MaxEpochs=20,WeightDecay=1e-4,Regularization=None,"
382 "Optimizer=ADAM,DropConfig=0.0+0.0+0.0+0.0");
384 TString trainingStrategyString(
"TrainingStrategy=");
385 trainingStrategyString +=
389 TString cnnOptions(
"!H:V:ErrorStrategy=CROSSENTROPY:VarTransform=None:"
390 "WeightInitialization=XAVIER");
392 cnnOptions.Append(
":");
393 cnnOptions.Append(inputLayoutString);
394 cnnOptions.Append(
":");
395 cnnOptions.Append(layoutString);
396 cnnOptions.Append(
":");
397 cnnOptions.Append(trainingStrategyString);
400 TString cnnMethodName =
"TMVA_CNN_CPU";
403 cnnOptions +=
":Architecture=GPU";
404 cnnMethodName =
"TMVA_CNN_GPU";
406 cnnOptions +=
":Architecture=CPU";
407 cnnMethodName =
"TMVA_CNN_CPU";
420 Info(
"TMVA_CNN_Classification",
"Building convolutional keras model");
425 m.AddLine(
"from tensorflow.keras.models import Sequential");
426 m.AddLine(
"from tensorflow.keras.optimizers import Adam");
428 "from tensorflow.keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, Reshape, BatchNormalization");
430 m.AddLine(
"model = Sequential() ");
431 m.AddLine(
"model.add(Reshape((16, 16, 1), input_shape = (256, )))");
432 m.AddLine(
"model.add(Conv2D(10, kernel_size = (3, 3), kernel_initializer = 'glorot_normal',activation = "
433 "'relu', padding = 'same'))");
434 m.AddLine(
"model.add(BatchNormalization())");
435 m.AddLine(
"model.add(Conv2D(10, kernel_size = (3, 3), kernel_initializer = 'glorot_normal',activation = "
436 "'relu', padding = 'same'))");
438 m.AddLine(
"model.add(MaxPooling2D(pool_size = (2, 2), strides = (1,1))) ");
439 m.AddLine(
"model.add(Flatten())");
440 m.AddLine(
"model.add(Dense(256, activation = 'relu')) ");
441 m.AddLine(
"model.add(Dense(2, activation = 'sigmoid')) ");
442 m.AddLine(
"model.compile(loss = 'binary_crossentropy', optimizer = Adam(lr = 0.001), metrics = ['accuracy'])");
443 m.AddLine(
"model.save('model_cnn.h5')");
444 m.AddLine(
"model.summary()");
446 m.SaveSource(
"make_cnn_model.py");
451 Warning(
"TMVA_CNN_Classification",
"Error creating Keras model file - skip using Keras");
454 Info(
"TMVA_CNN_Classification",
"Booking tf.Keras CNN model");
457 "H:!V:VarTransform=None:FilenameModel=model_cnn.h5:tf.keras:"
458 "FilenameTrainedModel=trained_model_cnn.h5:NumEpochs=20:BatchSize=100:"
459 "GpuOptions=allow_growth=True");
465 Info(
"TMVA_CNN_Classification",
"Using Convolutional PyTorch Model");
466 TString pyTorchFileName =
gROOT->GetTutorialDir() +
TString(
"/tmva/PyTorch_Generate_CNN_Model.py");
469 Warning(
"TMVA_CNN_Classification",
"PyTorch is not installed or model building file is not existing - skip using PyTorch");
473 Info(
"TMVA_CNN_Classification",
"Booking PyTorch CNN model");
474 TString methodOpt =
"H:!V:VarTransform=None:FilenameModel=PyTorchModelCNN.pt:"
475 "FilenameTrainedModel=PyTorchTrainedModelCNN.pt:NumEpochs=20:BatchSize=100";
476 methodOpt +=
TString(
":UserCode=") + pyTorchFileName;
484 factory.TrainAllMethods();
488 factory.TestAllMethods();
490 factory.EvaluateAllMethods();
494 auto c1 = factory.GetROCCurve(loader);
static const double x2[5]
static const double x1[5]
void Info(const char *location, const char *msgfmt,...)
Use this function for informational messages.
void Error(const char *location, const char *msgfmt,...)
Use this function in case an error occurred.
void Warning(const char *location, const char *msgfmt,...)
Use this function in warning situations.
R__EXTERN TRandom * gRandom
R__EXTERN TSystem * gSystem
A specialized string object used for TTree selections.
virtual void SetParameters(const Double_t *params)
virtual void SetParameter(Int_t param, Double_t value)
A 2-Dim function with parameters.
A ROOT file is a suite of consecutive data records (TKey instances) with a well defined format.
static TFile * Open(const char *name, Option_t *option="", const char *ftitle="", Int_t compress=ROOT::RCompressionSetting::EDefaults::kUseCompiledDefault, Int_t netopt=0)
Create / open a file.
void Close(Option_t *option="") override
Close a file.
virtual void Reset(Option_t *option="")
Reset.
virtual void FillRandom(const char *fname, Int_t ntimes=5000, TRandom *rng=nullptr)
Fill histogram following distribution in function fname.
virtual Double_t GetBinContent(Int_t bin) const
Return content of bin number bin.
2-D histogram with a double per channel (see TH1 documentation)}
void AddVariablesArray(const TString &expression, int size, char type='F', Double_t min=0, Double_t max=0)
user inserts discriminating array of variables in data set info in case input tree provides an array ...
void AddSignalTree(TTree *signal, Double_t weight=1.0, Types::ETreeType treetype=Types::kMaxTreeType)
number of signal events (used to compute significance)
void PrepareTrainingAndTestTree(const TCut &cut, const TString &splitOpt)
prepare the training and test trees -> same cuts for signal and background
void AddBackgroundTree(TTree *background, Double_t weight=1.0, Types::ETreeType treetype=Types::kMaxTreeType)
number of signal events (used to compute significance)
This is the main MVA steering class.
static void PyInitialize()
Initialize Python interpreter.
Class supporting a collection of lines with C++ code.
virtual TObjString * AddLine(const char *text)
Add line with text in the list of lines of this macro.
virtual Double_t Gaus(Double_t mean=0, Double_t sigma=1)
Samples a random number from the standard Normal (Gaussian) Distribution with the given mean and sigm...
virtual void SetSeed(ULong_t seed=0)
Set the random generator seed.
virtual Double_t Uniform(Double_t x1=1)
Returns a uniform deviate on the interval (0, x1).
const char * Data() const
static TString Format(const char *fmt,...)
Static method which formats a string using a printf style format descriptor and return a TString.
virtual Int_t Exec(const char *shellcmd)
Execute a command.
virtual Bool_t AccessPathName(const char *path, EAccessMode mode=kFileExists)
Returns FALSE if one can access a file using the specified access mode.
virtual void Setenv(const char *name, const char *value)
Set environment variable.
A TTree represents a columnar dataset.
virtual Long64_t GetEntries() const
void EnableImplicitMT(UInt_t numthreads=0)
Enable ROOT's implicit multi-threading for all objects and methods that provide an internal paralleli...
UInt_t GetThreadPoolSize()
Returns the size of ROOT's thread pool.