Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
TMVA_RNN_Classification.C
Go to the documentation of this file.
1/// \file
2/// \ingroup tutorial_tmva
3/// \notebook
4/// TMVA Classification Example Using a Recurrent Neural Network
5///
6/// This is an example of using a RNN in TMVA. We do classification using a toy time dependent data set
7/// that is generated when running this example macro
8///
9/// \macro_image
10/// \macro_output
11/// \macro_code
12///
13/// \author Lorenzo Moneta
14/***
15
16 # TMVA Classification Example Using a Recurrent Neural Network
17
18 This is an example of using a RNN in TMVA.
19 We do the classification using a toy data set containing a time series of data sample ntimes
20 and with dimension ndim that is generated when running the provided function `MakeTimeData (nevents, ntime, ndim)`
21
22
23**/
24
25#include<TROOT.h>
26
27#include "TMVA/Factory.h"
28#include "TMVA/DataLoader.h"
29#include "TMVA/DataSetInfo.h"
30#include "TMVA/Config.h"
31#include "TMVA/MethodDL.h"
32
33
34#include "TFile.h"
35#include "TTree.h"
36
37/// Helper function to generate the time data set
38/// make some time data but not of fixed length.
39/// use a poisson with mu = 5 and truncated at 10
40///
41void MakeTimeData(int n, int ntime, int ndim )
42{
43
44 // const int ntime = 10;
45 // const int ndim = 30; // number of dim/time
46 TString fname = TString::Format("time_data_t%d_d%d.root", ntime, ndim);
47 std::vector<TH1 *> v1(ntime);
48 std::vector<TH1 *> v2(ntime);
49 int i = 0;
50 for (int i = 0; i < ntime; ++i) {
51 v1[i] = new TH1D(TString::Format("h1_%d", i), "h1", ndim, 0, 10);
52 v2[i] = new TH1D(TString::Format("h2_%d", i), "h2", ndim, 0, 10);
53 }
54
55 auto f1 = new TF1("f1", "gaus");
56 auto f2 = new TF1("f2", "gaus");
57
58 TFile f(fname, "RECREATE");
59 TTree sgn("sgn", "sgn");
60 TTree bkg("bkg", "bkg");
61
62 std::vector<std::vector<float>> x1(ntime);
63 std::vector<std::vector<float>> x2(ntime);
64
65 for (int i = 0; i < ntime; ++i) {
66 x1[i] = std::vector<float>(ndim);
67 x2[i] = std::vector<float>(ndim);
68 }
69
70 for (auto i = 0; i < ntime; i++) {
71 bkg.Branch(Form("vars_time%d", i), "std::vector<float>", &x1[i]);
72 sgn.Branch(Form("vars_time%d", i), "std::vector<float>", &x2[i]);
73 }
74
75 sgn.SetDirectory(&f);
76 bkg.SetDirectory(&f);
77 gRandom->SetSeed(0);
78
79 std::vector<double> mean1(ntime);
80 std::vector<double> mean2(ntime);
81 std::vector<double> sigma1(ntime);
82 std::vector<double> sigma2(ntime);
83 for (int j = 0; j < ntime; ++j) {
84 mean1[j] = 5. + 0.2 * sin(TMath::Pi() * j / double(ntime));
85 mean2[j] = 5. + 0.2 * cos(TMath::Pi() * j / double(ntime));
86 sigma1[j] = 4 + 0.3 * sin(TMath::Pi() * j / double(ntime));
87 sigma2[j] = 4 + 0.3 * cos(TMath::Pi() * j / double(ntime));
88 }
89 for (int i = 0; i < n; ++i) {
90
91 if (i % 1000 == 0)
92 std::cout << "Generating event ... " << i << std::endl;
93
94 for (int j = 0; j < ntime; ++j) {
95 auto h1 = v1[j];
96 auto h2 = v2[j];
97 h1->Reset();
98 h2->Reset();
99
100 f1->SetParameters(1, mean1[j], sigma1[j]);
101 f2->SetParameters(1, mean2[j], sigma2[j]);
102
103 h1->FillRandom("f1", 1000);
104 h2->FillRandom("f2", 1000);
105
106 for (int k = 0; k < ndim; ++k) {
107 // std::cout << j*10+k << " ";
108 x1[j][k] = h1->GetBinContent(k + 1) + gRandom->Gaus(0, 10);
109 x2[j][k] = h2->GetBinContent(k + 1) + gRandom->Gaus(0, 10);
110 }
111 }
112 // std::cout << std::endl;
113 sgn.Fill();
114 bkg.Fill();
115
116 if (n == 1) {
117 auto c1 = new TCanvas();
118 c1->Divide(ntime, 2);
119 for (int j = 0; j < ntime; ++j) {
120 c1->cd(j + 1);
121 v1[j]->Draw();
122 }
123 for (int j = 0; j < ntime; ++j) {
124 c1->cd(ntime + j + 1);
125 v2[j]->Draw();
126 }
127 gPad->Update();
128 }
129 }
130 if (n > 1) {
131 sgn.Write();
132 bkg.Write();
133 sgn.Print();
134 bkg.Print();
135 f.Close();
136 }
137}
138/// macro for performing a classification using a Recurrent Neural Network
139/// @param nevts = 2000 Number of events used. (increase for better classification results)
140/// @param use_type
141/// use_type = 0 use Simple RNN network
142/// use_type = 1 use LSTM network
143/// use_type = 2 use GRU
144/// use_type = 3 build 3 different networks with RNN, LSTM and GRU
145
146void TMVA_RNN_Classification(int nevts = 2000, int use_type = 1)
147{
148
149 const int ninput = 30;
150 const int ntime = 10;
151 const int batchSize = 100;
152 const int maxepochs = 20;
153
154 int nTotEvts = nevts; // total events to be generated for signal or background
155
156 bool useKeras = true;
157
158
159 bool useTMVA_RNN = true;
160 bool useTMVA_DNN = true;
161 bool useTMVA_BDT = false;
162
163 std::vector<std::string> rnn_types = {"RNN", "LSTM", "GRU"};
164 std::vector<bool> use_rnn_type = {1, 1, 1};
165 if (use_type >=0 && use_type < 3) {
166 use_rnn_type = {0,0,0};
167 use_rnn_type[use_type] = 1;
168 }
169 bool useGPU = true; // use GPU for TMVA if available
170
171#ifndef R__HAS_TMVAGPU
172 useGPU = false;
173#ifndef R__HAS_TMVACPU
174 Warning("TMVA_RNN_Classification", "TMVA is not build with GPU or CPU multi-thread support. Cannot use TMVA Deep Learning for RNN");
175 useTMVA_RNN = false;
176#endif
177#endif
178
179
180 TString archString = (useGPU) ? "GPU" : "CPU";
181
182 bool writeOutputFile = true;
183
184
185
186 const char *rnn_type = "RNN";
187
188#ifdef R__HAS_PYMVA
190#else
191 useKeras = false;
192#endif
193
194#ifdef R__USE_IMT
195 int num_threads = 4; // use max 4 threads
196 // switch off MT in OpenBLAS to avoid conflict with tbb
197 gSystem->Setenv("OMP_NUM_THREADS", "1");
198
199 // do enable MT running
200 if (num_threads >= 0) {
201 ROOT::EnableImplicitMT(num_threads);
202 }
203#endif
204
206
207 std::cout << "Running with nthreads = " << ROOT::GetThreadPoolSize() << std::endl;
208
209 TString inputFileName = "time_data_t10_d30.root";
210
211 bool fileExist = !gSystem->AccessPathName(inputFileName);
212
213 // if file does not exists create it
214 if (!fileExist) {
215 MakeTimeData(nTotEvts,ntime, ninput);
216 }
217
218
219 auto inputFile = TFile::Open(inputFileName);
220 if (!inputFile) {
221 Error("TMVA_RNN_Classification", "Error opening input file %s - exit", inputFileName.Data());
222 return;
223 }
224
225
226 std::cout << "--- RNNClassification : Using input file: " << inputFile->GetName() << std::endl;
227
228 // Create a ROOT output file where TMVA will store ntuples, histograms, etc.
229 TString outfileName(TString::Format("data_RNN_%s.root", archString.Data()));
230 TFile *outputFile = nullptr;
231 if (writeOutputFile) outputFile = TFile::Open(outfileName, "RECREATE");
232
233 /**
234 ## Declare Factory
235
236 Create the Factory class. Later you can choose the methods
237 whose performance you'd like to investigate.
238
239 The factory is the major TMVA object you have to interact with. Here is the list of parameters you need to
240pass
241
242 - The first argument is the base of the name of all the output
243 weightfiles in the directory weight/ that will be created with the
244 method parameters
245
246 - The second argument is the output file for the training results
247
248 - The third argument is a string option defining some general configuration for the TMVA session.
249 For example all TMVA output can be suppressed by removing the "!" (not) in front of the "Silent" argument in
250the option string
251
252 **/
253
254 // Creating the factory object
255 TMVA::Factory *factory = new TMVA::Factory("TMVAClassification", outputFile,
256 "!V:!Silent:Color:DrawProgressBar:Transformations=None:!Correlations:"
257 "AnalysisType=Classification:ModelPersistence");
258 TMVA::DataLoader *dataloader = new TMVA::DataLoader("dataset");
259
260 TTree *signalTree = (TTree *)inputFile->Get("sgn");
261 TTree *background = (TTree *)inputFile->Get("bkg");
262
263 const int nvar = ninput * ntime;
264
265 /// add variables - use new AddVariablesArray function
266 for (auto i = 0; i < ntime; i++) {
267 dataloader->AddVariablesArray(Form("vars_time%d", i), ninput);
268 }
269
270 dataloader->AddSignalTree(signalTree, 1.0);
271 dataloader->AddBackgroundTree(background, 1.0);
272
273 // check given input
274 auto &datainfo = dataloader->GetDataSetInfo();
275 auto vars = datainfo.GetListOfVariables();
276 std::cout << "number of variables is " << vars.size() << std::endl;
277 for (auto &v : vars)
278 std::cout << v << ",";
279 std::cout << std::endl;
280
281 int nTrainSig = 0.8 * nTotEvts;
282 int nTrainBkg = 0.8 * nTotEvts;
283
284 // build the string options for DataLoader::PrepareTrainingAndTestTree
285 TString prepareOptions = TString::Format("nTrain_Signal=%d:nTrain_Background=%d:SplitMode=Random:SplitSeed=100:NormMode=NumEvents:!V:!CalcCorrelations", nTrainSig, nTrainBkg);
286
287 // Apply additional cuts on the signal and background samples (can be different)
288 TCut mycuts = ""; // for example: TCut mycuts = "abs(var1)<0.5 && abs(var2-0.5)<1";
289 TCut mycutb = "";
290
291 dataloader->PrepareTrainingAndTestTree(mycuts, mycutb, prepareOptions);
292
293 std::cout << "prepared DATA LOADER " << std::endl;
294
295 /**
296 ## Book TMVA recurrent models
297
298 Book the different types of recurrent models in TMVA (SimpleRNN, LSTM or GRU)
299
300 **/
301
302 if (useTMVA_RNN) {
303
304 for (int i = 0; i < 3; ++i) {
305
306 if (!use_rnn_type[i])
307 continue;
308
309 const char *rnn_type = rnn_types[i].c_str();
310
311 /// define the inputlayout string for RNN
312 /// the input data should be organize as following:
313 //// input layout for RNN: time x ndim
314
315 TString inputLayoutString = TString::Format("InputLayout=%d|%d", ntime, ninput);
316
317 /// Define RNN layer layout
318 /// it should be LayerType (RNN or LSTM or GRU) | number of units | number of inputs | time steps | remember output (typically no=0 | return full sequence
319 TString rnnLayout = TString::Format("%s|10|%d|%d|0|1", rnn_type, ninput, ntime);
320
321 /// add after RNN a reshape layer (needed top flatten the output) and a dense layer with 64 units and a last one
322 /// Note the last layer is linear because when using Crossentropy a Sigmoid is applied already
323 TString layoutString = TString("Layout=") + rnnLayout + TString(",RESHAPE|FLAT,DENSE|64|TANH,LINEAR");
324
325 /// Defining Training strategies. Different training strings can be concatenate. Use however only one
326 TString trainingString1 = TString::Format("LearningRate=1e-3,Momentum=0.0,Repetitions=1,"
327 "ConvergenceSteps=5,BatchSize=%d,TestRepetitions=1,"
328 "WeightDecay=1e-2,Regularization=None,MaxEpochs=%d,"
329 "Optimizer=ADAM,DropConfig=0.0+0.+0.+0.",
330 batchSize,maxepochs);
331
332 TString trainingStrategyString("TrainingStrategy=");
333 trainingStrategyString += trainingString1; // + "|" + trainingString2
334
335 /// Define the full RNN Noption string adding the final options for all network
336 TString rnnOptions("!H:V:ErrorStrategy=CROSSENTROPY:VarTransform=None:"
337 "WeightInitialization=XAVIERUNIFORM:ValidationSize=0.2:RandomSeed=1234");
338
339 rnnOptions.Append(":");
340 rnnOptions.Append(inputLayoutString);
341 rnnOptions.Append(":");
342 rnnOptions.Append(layoutString);
343 rnnOptions.Append(":");
344 rnnOptions.Append(trainingStrategyString);
345 rnnOptions.Append(":");
346 rnnOptions.Append(TString::Format("Architecture=%s", archString.Data()));
347
348 TString rnnName = "TMVA_" + TString(rnn_type);
349 factory->BookMethod(dataloader, TMVA::Types::kDL, rnnName, rnnOptions);
350
351 }
352 }
353
354 /**
355 ## Book TMVA fully connected dense layer models
356
357 **/
358
359 if (useTMVA_DNN) {
360 // Method DL with Dense Layer
361 TString inputLayoutString = TString::Format("InputLayout=1|1|%d", ntime * ninput);
362
363 TString layoutString("Layout=DENSE|64|TANH,DENSE|TANH|64,DENSE|TANH|64,LINEAR");
364 // Training strategies.
365 TString trainingString1("LearningRate=1e-3,Momentum=0.0,Repetitions=1,"
366 "ConvergenceSteps=10,BatchSize=256,TestRepetitions=1,"
367 "WeightDecay=1e-4,Regularization=None,MaxEpochs=20"
368 "DropConfig=0.0+0.+0.+0.,Optimizer=ADAM");
369 TString trainingStrategyString("TrainingStrategy=");
370 trainingStrategyString += trainingString1; // + "|" + trainingString2
371
372 // General Options.
373 TString dnnOptions("!H:V:ErrorStrategy=CROSSENTROPY:VarTransform=None:"
374 "WeightInitialization=XAVIER:RandomSeed=0");
375
376 dnnOptions.Append(":");
377 dnnOptions.Append(inputLayoutString);
378 dnnOptions.Append(":");
379 dnnOptions.Append(layoutString);
380 dnnOptions.Append(":");
381 dnnOptions.Append(trainingStrategyString);
382 dnnOptions.Append(":");
383 dnnOptions.Append(archString);
384
385 TString dnnName = "TMVA_DNN";
386 factory->BookMethod(dataloader, TMVA::Types::kDL, dnnName, dnnOptions);
387 }
388
389 /**
390 ## Book Keras recurrent models
391
392 Book the different types of recurrent models in Keras (SimpleRNN, LSTM or GRU)
393
394 **/
395
396 if (useKeras) {
397
398 for (int i = 0; i < 3; i++) {
399
400 if (use_rnn_type[i]) {
401
402 TString modelName = TString::Format("model_%s.h5", rnn_types[i].c_str());
403 TString trainedModelName = TString::Format("trained_model_%s.h5", rnn_types[i].c_str());
404
405 Info("TMVA_RNN_Classification", "Building recurrent keras model using a %s layer", rnn_types[i].c_str());
406 // create python script which can be executed
407 // create 2 conv2d layer + maxpool + dense
408 TMacro m;
409 m.AddLine("import tensorflow");
410 m.AddLine("from tensorflow.keras.models import Sequential");
411 m.AddLine("from tensorflow.keras.optimizers import Adam");
412 m.AddLine("from tensorflow.keras.layers import Input, Dense, Dropout, Flatten, SimpleRNN, GRU, LSTM, Reshape, "
413 "BatchNormalization");
414 m.AddLine("");
415 m.AddLine("model = Sequential() ");
416 m.AddLine("model.add(Reshape((10, 30), input_shape = (10*30, )))");
417 // add recurrent neural network depending on type / Use option to return the full output
418 if (rnn_types[i] == "LSTM")
419 m.AddLine("model.add(LSTM(units=10, return_sequences=True) )");
420 else if (rnn_types[i] == "GRU")
421 m.AddLine("model.add(GRU(units=10, return_sequences=True) )");
422 else
423 m.AddLine("model.add(SimpleRNN(units=10, return_sequences=True) )");
424
425 // m.AddLine("model.add(BatchNormalization())");
426 m.AddLine("model.add(Flatten())"); // needed if returning the full time output sequence
427 m.AddLine("model.add(Dense(64, activation = 'tanh')) ");
428 m.AddLine("model.add(Dense(2, activation = 'sigmoid')) ");
429 m.AddLine(
430 "model.compile(loss = 'binary_crossentropy', optimizer = Adam(learning_rate = 0.001), weighted_metrics = ['accuracy'])");
431 m.AddLine(TString::Format("modelName = '%s'", modelName.Data()));
432 m.AddLine("model.save(modelName)");
433 m.AddLine("model.summary()");
434
435 m.SaveSource("make_rnn_model.py");
436 // execute python script to make the model
437 auto ret = (TString *)gROOT->ProcessLine("TMVA::Python_Executable()");
438 TString python_exe = (ret) ? *(ret) : "python";
439 gSystem->Exec(python_exe + " make_rnn_model.py");
440
441 if (gSystem->AccessPathName(modelName)) {
442 Warning("TMVA_RNN_Classification", "Error creating Keras recurrent model file - Skip using Keras");
443 useKeras = false;
444 } else {
445 // book PyKeras method only if Keras model could be created
446 Info("TMVA_RNN_Classification", "Booking Keras %s model", rnn_types[i].c_str());
447 factory->BookMethod(dataloader, TMVA::Types::kPyKeras,
448 TString::Format("PyKeras_%s", rnn_types[i].c_str()),
449 TString::Format("!H:!V:VarTransform=None:FilenameModel=%s:tf.keras:"
450 "FilenameTrainedModel=%s:GpuOptions=allow_growth=True:"
451 "NumEpochs=%d:BatchSize=%d",
452 modelName.Data(), trainedModelName.Data(), maxepochs, batchSize));
453 }
454 }
455 }
456 }
457
458 // use BDT in case not using Keras or TMVA DL
459 if (!useKeras || !useTMVA_BDT)
460 useTMVA_BDT = true;
461
462 /**
463 ## Book TMVA BDT
464 **/
465
466 if (useTMVA_BDT) {
467
468 factory->BookMethod(dataloader, TMVA::Types::kBDT, "BDTG",
469 "!H:!V:NTrees=100:MinNodeSize=2.5%:BoostType=Grad:Shrinkage=0.10:UseBaggedBoost:"
470 "BaggedSampleFraction=0.5:nCuts=20:"
471 "MaxDepth=2");
472
473 }
474
475 /// Train all methods
476 factory->TrainAllMethods();
477
478 std::cout << "nthreads = " << ROOT::GetThreadPoolSize() << std::endl;
479
480 // ---- Evaluate all MVAs using the set of test events
481 factory->TestAllMethods();
482
483 // ----- Evaluate and compare performance of all configured MVAs
484 factory->EvaluateAllMethods();
485
486 // check method
487
488 // plot ROC curve
489 auto c1 = factory->GetROCCurve(dataloader);
490 c1->Draw();
491
492 if (outputFile) outputFile->Close();
493}
#define f(i)
Definition RSha256.hxx:104
void Info(const char *location, const char *msgfmt,...)
Use this function for informational messages.
Definition TError.cxx:218
void Error(const char *location, const char *msgfmt,...)
Use this function in case an error occurred.
Definition TError.cxx:185
void Warning(const char *location, const char *msgfmt,...)
Use this function in warning situations.
Definition TError.cxx:229
Option_t Option_t TPoint TPoint const char x2
Option_t Option_t TPoint TPoint const char x1
#define gROOT
Definition TROOT.h:406
R__EXTERN TRandom * gRandom
Definition TRandom.h:62
char * Form(const char *fmt,...)
Formats a string in a circular formatting buffer.
Definition TString.cxx:2489
R__EXTERN TSystem * gSystem
Definition TSystem.h:555
#define gPad
The Canvas class.
Definition TCanvas.h:23
A specialized string object used for TTree selections.
Definition TCut.h:25
1-Dim function class
Definition TF1.h:233
virtual void SetParameters(const Double_t *params)
Definition TF1.h:670
A ROOT file is an on-disk file, usually with extension .root, that stores objects in a file-system-li...
Definition TFile.h:53
static TFile * Open(const char *name, Option_t *option="", const char *ftitle="", Int_t compress=ROOT::RCompressionSetting::EDefaults::kUseCompiledDefault, Int_t netopt=0)
Create / open a file.
Definition TFile.cxx:4082
void Close(Option_t *option="") override
Close a file.
Definition TFile.cxx:943
void Draw(Option_t *chopt="") override
Draw this graph with its current attributes.
Definition TGraph.cxx:809
1-D histogram with a double per channel (see TH1 documentation)
Definition TH1.h:669
void Reset(Option_t *option="") override
Reset.
Definition TH1.cxx:10278
virtual void FillRandom(const char *fname, Int_t ntimes=5000, TRandom *rng=nullptr)
Fill histogram following distribution in function fname.
Definition TH1.cxx:3519
virtual Double_t GetBinContent(Int_t bin) const
Return content of bin number bin.
Definition TH1.cxx:5029
static Config & Instance()
static function: returns TMVA instance
Definition Config.cxx:98
void AddVariablesArray(const TString &expression, int size, char type='F', Double_t min=0, Double_t max=0)
user inserts discriminating array of variables in data set info in case input tree provides an array ...
void AddSignalTree(TTree *signal, Double_t weight=1.0, Types::ETreeType treetype=Types::kMaxTreeType)
number of signal events (used to compute significance)
void PrepareTrainingAndTestTree(const TCut &cut, const TString &splitOpt)
prepare the training and test trees -> same cuts for signal and background
void AddBackgroundTree(TTree *background, Double_t weight=1.0, Types::ETreeType treetype=Types::kMaxTreeType)
number of signal events (used to compute significance)
DataSetInfo & GetDataSetInfo()
std::vector< TString > GetListOfVariables() const
returns list of variables
This is the main MVA steering class.
Definition Factory.h:80
void TrainAllMethods()
Iterates through all booked methods and calls training.
Definition Factory.cxx:1114
MethodBase * BookMethod(DataLoader *loader, TString theMethodName, TString methodTitle, TString theOption="")
Book a classifier or regression method.
Definition Factory.cxx:352
void TestAllMethods()
Evaluates all booked methods on the testing data and adds the output to the Results in the corresponi...
Definition Factory.cxx:1271
void EvaluateAllMethods(void)
Iterates over all MVAs that have been booked, and calls their evaluation methods.
Definition Factory.cxx:1376
TGraph * GetROCCurve(DataLoader *loader, TString theMethodName, Bool_t setTitles=kTRUE, UInt_t iClass=0, Types::ETreeType type=Types::kTesting)
Argument iClass specifies the class to generate the ROC curve in a multiclass setting.
Definition Factory.cxx:912
static void PyInitialize()
Initialize Python interpreter.
Class supporting a collection of lines with C++ code.
Definition TMacro.h:31
virtual Double_t Gaus(Double_t mean=0, Double_t sigma=1)
Samples a random number from the standard Normal (Gaussian) Distribution with the given mean and sigm...
Definition TRandom.cxx:275
virtual void SetSeed(ULong_t seed=0)
Set the random generator seed.
Definition TRandom.cxx:615
Basic string class.
Definition TString.h:139
const char * Data() const
Definition TString.h:376
static TString Format(const char *fmt,...)
Static method which formats a string using a printf style format descriptor and return a TString.
Definition TString.cxx:2378
virtual Int_t Exec(const char *shellcmd)
Execute a command.
Definition TSystem.cxx:653
virtual Bool_t AccessPathName(const char *path, EAccessMode mode=kFileExists)
Returns FALSE if one can access a file using the specified access mode.
Definition TSystem.cxx:1296
virtual void Setenv(const char *name, const char *value)
Set environment variable.
Definition TSystem.cxx:1649
A TTree represents a columnar dataset.
Definition TTree.h:79
RVec< PromoteType< T > > cos(const RVec< T > &v)
Definition RVec.hxx:1815
RVec< PromoteType< T > > sin(const RVec< T > &v)
Definition RVec.hxx:1814
return c1
Definition legend1.C:41
const Int_t n
Definition legend1.C:16
TH1F * h1
Definition legend1.C:5
TF1 * f1
Definition legend1.C:11
void EnableImplicitMT(UInt_t numthreads=0)
Enable ROOT's implicit multi-threading for all objects and methods that provide an internal paralleli...
Definition TROOT.cxx:537
UInt_t GetThreadPoolSize()
Returns the size of ROOT's thread pool.
Definition TROOT.cxx:575
constexpr Double_t Pi()
Definition TMath.h:37
TMarker m
Definition textangle.C:8