Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
TMVA_SOFIE_Models.py File Reference

Detailed Description

View in nbviewer Open in SWAN
Example of inference with SOFIE using a set of models trained with Keras.

This tutorial shows how to store several models in a single header file and the weights in a ROOT binary file. The models are then evaluated using the RDataFrame First, generate the input model by running TMVA_Higgs_Classification.C.

This tutorial parses the input model and runs the inference using ROOT's JITing capability.

import os
import numpy as np
import ROOT
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
## generate and train Keras models with different architectures
def CreateModel(nlayers = 4, nunits = 64):
model = Sequential()
model.add(Dense(nunits, activation='relu',input_dim=7))
for i in range(1,nlayers) :
model.add(Dense(nunits, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss = 'binary_crossentropy', optimizer = Adam(learning_rate = 0.001), weighted_metrics = ['accuracy'])
return model
def PrepareData() :
#get the input data
inputFile = str(ROOT.gROOT.GetTutorialDir()) + "/machine_learning/data/Higgs_data.root"
df1 = ROOT.RDataFrame("sig_tree", inputFile)
sigData = df1.AsNumpy(columns=['m_jj', 'm_jjj', 'm_lv', 'm_jlv', 'm_bb', 'm_wbb', 'm_wwbb'])
#print(sigData)
# stack all the 7 numpy array in a single array (nevents x nvars)
data_sig_size = xsig.shape[0]
print("size of data", data_sig_size)
# make SOFIE inference on background data
df2 = ROOT.RDataFrame("bkg_tree", inputFile)
bkgData = df2.AsNumpy(columns=['m_jj', 'm_jjj', 'm_lv', 'm_jlv', 'm_bb', 'm_wbb', 'm_wwbb'])
data_bkg_size = xbkg.shape[0]
ysig = np.ones(data_sig_size)
ybkg = np.zeros(data_bkg_size)
inputs_data = np.concatenate((xsig,xbkg),axis=0)
inputs_targets = np.concatenate((ysig,ybkg),axis=0)
#split data in training and test data
x_train, x_test, y_train, y_test = train_test_split(
inputs_data, inputs_targets, test_size=0.50, random_state=1234)
return x_train, y_train, x_test, y_test
def TrainModel(model, x, y, name) :
model.fit(x,y,epochs=5,batch_size=50)
modelFile = name + '.keras'
model.save(modelFile)
return modelFile
### run the models
x_train, y_train, x_test, y_test = PrepareData()
## create models and train them
model1 = TrainModel(CreateModel(4,64),x_train, y_train, 'Higgs_Model_4L_50')
model2 = TrainModel(CreateModel(4,64),x_train, y_train, 'Higgs_Model_4L_200')
model3 = TrainModel(CreateModel(4,64),x_train, y_train, 'Higgs_Model_2L_500')
#evaluate with SOFIE the 3 trained models
def GenerateModelCode(modelFile, generatedHeaderFile):
print("Generating inference code for the Keras model from ",modelFile,"in the header ", generatedHeaderFile)
#Generating inference code using a ROOT binary file
# add option to append to the same file the generated headers (pass True for append flag)
model.OutputGenerated(generatedHeaderFile, True)
#model.PrintGenerated()
return generatedHeaderFile
generatedHeaderFile = "Higgs_Model.hxx"
#need to remove existing header file since we are appending on same one
if (os.path.exists(generatedHeaderFile)):
print("removing existing file", generatedHeaderFile)
os.remove(generatedHeaderFile)
weightFile = "Higgs_Model.root"
if (os.path.exists(weightFile)):
print("removing existing file", weightFile)
os.remove(weightFile)
GenerateModelCode(model1, generatedHeaderFile)
GenerateModelCode(model2, generatedHeaderFile)
GenerateModelCode(model3, generatedHeaderFile)
#compile the generated code
ROOT.gInterpreter.Declare('#include "' + generatedHeaderFile + '"')
#run the inference on the test data
session1 = ROOT.TMVA_SOFIE_Higgs_Model_4L_50.Session("Higgs_Model.root")
session2 = ROOT.TMVA_SOFIE_Higgs_Model_4L_200.Session("Higgs_Model.root")
session3 = ROOT.TMVA_SOFIE_Higgs_Model_2L_500.Session("Higgs_Model.root")
hs1 = ROOT.TH1D("hs1","Signal result 4L 50",100,0,1)
hs2 = ROOT.TH1D("hs2","Signal result 4L 200",100,0,1)
hs3 = ROOT.TH1D("hs3","Signal result 2L 500",100,0,1)
hb1 = ROOT.TH1D("hb1","Background result 4L 50",100,0,1)
hb2 = ROOT.TH1D("hb2","Background result 4L 200",100,0,1)
hb3 = ROOT.TH1D("hb3","Background result 2L 500",100,0,1)
def EvalModel(session, x) :
result = session.infer(x)
return result[0]
for i in range(0,x_test.shape[0]):
result1 = EvalModel(session1, x_test[i,:])
result2 = EvalModel(session2, x_test[i,:])
result3 = EvalModel(session3, x_test[i,:])
if (y_test[i] == 1) :
hs1.Fill(result1)
hs2.Fill(result2)
hs3.Fill(result3)
else:
hb1.Fill(result1)
hb2.Fill(result2)
hb3.Fill(result3)
def PlotHistos(hs,hb):
hb.SetLineColor("kBlue")
hb.Draw("same")
PlotHistos(hs1,hb1)
PlotHistos(hs2,hb2)
PlotHistos(hs3,hb3)
## draw also ROC curves
def GetContent(h) :
x = ROOT.std.vector['float'](n)
w = ROOT.std.vector['float'](n)
for i in range(0,n):
x[i] = h.GetBinCenter(i+1)
w[i] = h.GetBinContent(i+1)
return x,w
def MakeROCCurve(hs, hb) :
xs,ws = GetContent(hs)
xb,wb = GetContent(hb)
roc = ROOT.TMVA.ROCCurve(xs,xb,ws,wb)
print("ROC integral for ",hs.GetName(), roc.GetROCIntegral())
curve = roc.GetROCCurve()
return roc,curve
r1,curve1 = MakeROCCurve(hs1,hb1)
r2,curve2 = MakeROCCurve(hs2,hb2)
r3,curve3 = MakeROCCurve(hs3,hb3)
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
ROOT's RDataFrame offers a modern, high-level interface for analysis of data stored in TTree ,...
size of data 10000
Model: "sequential"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓
┃ Layer (type) ┃ Output Shape ┃ Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩
│ dense (Dense) │ (None, 64) │ 512 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense_1 (Dense) │ (None, 64) │ 4,160 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense_2 (Dense) │ (None, 64) │ 4,160 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense_3 (Dense) │ (None, 64) │ 4,160 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense_4 (Dense) │ (None, 1) │ 65 │
└─────────────────────────────────┴────────────────────────┴───────────────┘
Total params: 13,057 (51.00 KB)
Trainable params: 13,057 (51.00 KB)
Non-trainable params: 0 (0.00 B)
Epoch 1/5
␛[1m 1/200␛[0m ␛[37m━━━━━━━━━━━━━━━━━━━━␛[0m ␛[1m2:06␛[0m 634ms/step - accuracy: 0.6600 - loss: 0.6685␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m 64/200␛[0m ␛[32m━━━━━━␛[0m␛[37m━━━━━━━━━━━━━━␛[0m ␛[1m0s␛[0m 805us/step - accuracy: 0.5189 - loss: 0.6907 ␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m131/200␛[0m ␛[32m━━━━━━━━━━━━━␛[0m␛[37m━━━━━━━␛[0m ␛[1m0s␛[0m 780us/step - accuracy: 0.5421 - loss: 0.6828␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m200/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━━␛[0m␛[37m␛[0m ␛[1m0s␛[0m 763us/step - accuracy: 0.5557 - loss: 0.6781␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m200/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━━␛[0m␛[37m␛[0m ␛[1m1s␛[0m 816us/step - accuracy: 0.5893 - loss: 0.6659
Epoch 2/5
␛[1m 1/200␛[0m ␛[37m━━━━━━━━━━━━━━━━━━━━␛[0m ␛[1m2s␛[0m 11ms/step - accuracy: 0.5600 - loss: 0.6615␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m 70/200␛[0m ␛[32m━━━━━━━␛[0m␛[37m━━━━━━━━━━━━━␛[0m ␛[1m0s␛[0m 730us/step - accuracy: 0.6250 - loss: 0.6462␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m140/200␛[0m ␛[32m━━━━━━━━━━━━━━␛[0m␛[37m━━━━━━␛[0m ␛[1m0s␛[0m 723us/step - accuracy: 0.6306 - loss: 0.6439␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m200/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━━␛[0m␛[37m␛[0m ␛[1m0s␛[0m 778us/step - accuracy: 0.6403 - loss: 0.6372
Epoch 3/5
␛[1m 1/200␛[0m ␛[37m━━━━━━━━━━━━━━━━━━━━␛[0m ␛[1m2s␛[0m 11ms/step - accuracy: 0.6600 - loss: 0.6174␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m 71/200␛[0m ␛[32m━━━━━━━␛[0m␛[37m━━━━━━━━━━━━━␛[0m ␛[1m0s␛[0m 723us/step - accuracy: 0.6487 - loss: 0.6213␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m144/200␛[0m ␛[32m━━━━━━━━━━━━━━␛[0m␛[37m━━━━━━␛[0m ␛[1m0s␛[0m 708us/step - accuracy: 0.6485 - loss: 0.6231␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m200/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━━␛[0m␛[37m␛[0m ␛[1m0s␛[0m 756us/step - accuracy: 0.6462 - loss: 0.6280
Epoch 4/5
␛[1m 1/200␛[0m ␛[37m━━━━━━━━━━━━━━━━━━━━␛[0m ␛[1m2s␛[0m 11ms/step - accuracy: 0.6400 - loss: 0.6319␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m 73/200␛[0m ␛[32m━━━━━━━␛[0m␛[37m━━━━━━━━━━━━━␛[0m ␛[1m0s␛[0m 702us/step - accuracy: 0.6644 - loss: 0.6114␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m145/200␛[0m ␛[32m━━━━━━━━━━━━━━␛[0m␛[37m━━━━━━␛[0m ␛[1m0s␛[0m 701us/step - accuracy: 0.6613 - loss: 0.6128␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m200/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━━␛[0m␛[37m␛[0m ␛[1m0s␛[0m 755us/step - accuracy: 0.6610 - loss: 0.6147
Epoch 5/5
␛[1m 1/200␛[0m ␛[37m━━━━━━━━━━━━━━━━━━━━␛[0m ␛[1m2s␛[0m 11ms/step - accuracy: 0.6200 - loss: 0.6216␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m 70/200␛[0m ␛[32m━━━━━━━␛[0m␛[37m━━━━━━━━━━━━━␛[0m ␛[1m0s␛[0m 725us/step - accuracy: 0.6550 - loss: 0.6131␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m140/200␛[0m ␛[32m━━━━━━━━━━━━━━␛[0m␛[37m━━━━━━␛[0m ␛[1m0s␛[0m 724us/step - accuracy: 0.6574 - loss: 0.6124␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m200/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━━␛[0m␛[37m␛[0m ␛[1m0s␛[0m 764us/step - accuracy: 0.6616 - loss: 0.6102
Model: "sequential_1"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓
┃ Layer (type) ┃ Output Shape ┃ Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩
│ dense_5 (Dense) │ (None, 64) │ 512 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense_6 (Dense) │ (None, 64) │ 4,160 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense_7 (Dense) │ (None, 64) │ 4,160 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense_8 (Dense) │ (None, 64) │ 4,160 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense_9 (Dense) │ (None, 1) │ 65 │
└─────────────────────────────────┴────────────────────────┴───────────────┘
Total params: 13,057 (51.00 KB)
Trainable params: 13,057 (51.00 KB)
Non-trainable params: 0 (0.00 B)
Epoch 1/5
␛[1m 1/200␛[0m ␛[37m━━━━━━━━━━━━━━━━━━━━␛[0m ␛[1m1:55␛[0m 582ms/step - accuracy: 0.4800 - loss: 0.7015␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m 61/200␛[0m ␛[32m━━━━━━␛[0m␛[37m━━━━━━━━━━━━━━␛[0m ␛[1m0s␛[0m 844us/step - accuracy: 0.5310 - loss: 0.6886 ␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m124/200␛[0m ␛[32m━━━━━━━━━━━━␛[0m␛[37m━━━━━━━━␛[0m ␛[1m0s␛[0m 819us/step - accuracy: 0.5501 - loss: 0.6824␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m193/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━␛[0m␛[37m━␛[0m ␛[1m0s␛[0m 788us/step - accuracy: 0.5621 - loss: 0.6774␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m200/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━━␛[0m␛[37m␛[0m ␛[1m1s␛[0m 833us/step - accuracy: 0.5907 - loss: 0.6645
Epoch 2/5
␛[1m 1/200␛[0m ␛[37m━━━━━━━━━━━━━━━━━━━━␛[0m ␛[1m2s␛[0m 11ms/step - accuracy: 0.5800 - loss: 0.6683␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m 70/200␛[0m ␛[32m━━━━━━━␛[0m␛[37m━━━━━━━━━━━━━␛[0m ␛[1m0s␛[0m 728us/step - accuracy: 0.6200 - loss: 0.6543␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m140/200␛[0m ␛[32m━━━━━━━━━━━━━━␛[0m␛[37m━━━━━━␛[0m ␛[1m0s␛[0m 727us/step - accuracy: 0.6265 - loss: 0.6498␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m200/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━━␛[0m␛[37m␛[0m ␛[1m0s␛[0m 781us/step - accuracy: 0.6336 - loss: 0.6393
Epoch 3/5
␛[1m 1/200␛[0m ␛[37m━━━━━━━━━━━━━━━━━━━━␛[0m ␛[1m2s␛[0m 11ms/step - accuracy: 0.7400 - loss: 0.6108␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m 68/200␛[0m ␛[32m━━━━━━␛[0m␛[37m━━━━━━━━━━━━━━␛[0m ␛[1m0s␛[0m 747us/step - accuracy: 0.6449 - loss: 0.6336␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m135/200␛[0m ␛[32m━━━━━━━━━━━━━␛[0m␛[37m━━━━━━━␛[0m ␛[1m0s␛[0m 747us/step - accuracy: 0.6449 - loss: 0.6312␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m200/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━━␛[0m␛[37m␛[0m ␛[1m0s␛[0m 798us/step - accuracy: 0.6501 - loss: 0.6241
Epoch 4/5
␛[1m 1/200␛[0m ␛[37m━━━━━━━━━━━━━━━━━━━━␛[0m ␛[1m2s␛[0m 11ms/step - accuracy: 0.6600 - loss: 0.6135␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m 70/200␛[0m ␛[32m━━━━━━━␛[0m␛[37m━━━━━━━━━━━━━␛[0m ␛[1m0s␛[0m 734us/step - accuracy: 0.6848 - loss: 0.6031␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m138/200␛[0m ␛[32m━━━━━━━━━━━━━␛[0m␛[37m━━━━━━━␛[0m ␛[1m0s␛[0m 736us/step - accuracy: 0.6753 - loss: 0.6077␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m200/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━━␛[0m␛[37m␛[0m ␛[1m0s␛[0m 787us/step - accuracy: 0.6612 - loss: 0.6154
Epoch 5/5
␛[1m 1/200␛[0m ␛[37m━━━━━━━━━━━━━━━━━━━━␛[0m ␛[1m2s␛[0m 11ms/step - accuracy: 0.7400 - loss: 0.5404␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m 67/200␛[0m ␛[32m━━━━━━␛[0m␛[37m━━━━━━━━━━━━━━␛[0m ␛[1m0s␛[0m 766us/step - accuracy: 0.6567 - loss: 0.6151␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m134/200␛[0m ␛[32m━━━━━━━━━━━━━␛[0m␛[37m━━━━━━━␛[0m ␛[1m0s␛[0m 759us/step - accuracy: 0.6618 - loss: 0.6122␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m200/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━━␛[0m␛[37m␛[0m ␛[1m0s␛[0m 804us/step - accuracy: 0.6641 - loss: 0.6076
Model: "sequential_2"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓
┃ Layer (type) ┃ Output Shape ┃ Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩
│ dense_10 (Dense) │ (None, 64) │ 512 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense_11 (Dense) │ (None, 64) │ 4,160 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense_12 (Dense) │ (None, 64) │ 4,160 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense_13 (Dense) │ (None, 64) │ 4,160 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense_14 (Dense) │ (None, 1) │ 65 │
└─────────────────────────────────┴────────────────────────┴───────────────┘
Total params: 13,057 (51.00 KB)
Trainable params: 13,057 (51.00 KB)
Non-trainable params: 0 (0.00 B)
Epoch 1/5
␛[1m 1/200␛[0m ␛[37m━━━━━━━━━━━━━━━━━━━━␛[0m ␛[1m2:23␛[0m 722ms/step - accuracy: 0.5000 - loss: 0.7212␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m 62/200␛[0m ␛[32m━━━━━━␛[0m␛[37m━━━━━━━━━━━━━━␛[0m ␛[1m0s␛[0m 826us/step - accuracy: 0.5490 - loss: 0.6874 ␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m126/200␛[0m ␛[32m━━━━━━━━━━━━␛[0m␛[37m━━━━━━━━␛[0m ␛[1m0s␛[0m 805us/step - accuracy: 0.5568 - loss: 0.6813␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m192/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━␛[0m␛[37m━␛[0m ␛[1m0s␛[0m 789us/step - accuracy: 0.5666 - loss: 0.6767␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m200/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━━␛[0m␛[37m␛[0m ␛[1m1s␛[0m 835us/step - accuracy: 0.5933 - loss: 0.6636
Epoch 2/5
␛[1m 1/200␛[0m ␛[37m━━━━━━━━━━━━━━━━━━━━␛[0m ␛[1m2s␛[0m 11ms/step - accuracy: 0.6200 - loss: 0.6523␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m 66/200␛[0m ␛[32m━━━━━━␛[0m␛[37m━━━━━━━━━━━━━━␛[0m ␛[1m0s␛[0m 773us/step - accuracy: 0.6184 - loss: 0.6438␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m133/200␛[0m ␛[32m━━━━━━━━━━━━━␛[0m␛[37m━━━━━━━␛[0m ␛[1m0s␛[0m 765us/step - accuracy: 0.6260 - loss: 0.6419␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m198/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━␛[0m␛[37m━␛[0m ␛[1m0s␛[0m 768us/step - accuracy: 0.6285 - loss: 0.6411␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m200/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━━␛[0m␛[37m␛[0m ␛[1m0s␛[0m 817us/step - accuracy: 0.6361 - loss: 0.6377
Epoch 3/5
␛[1m 1/200␛[0m ␛[37m━━━━━━━━━━━━━━━━━━━━␛[0m ␛[1m2s␛[0m 11ms/step - accuracy: 0.7800 - loss: 0.5739␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m 67/200␛[0m ␛[32m━━━━━━␛[0m␛[37m━━━━━━━━━━━━━━␛[0m ␛[1m0s␛[0m 760us/step - accuracy: 0.6609 - loss: 0.6228␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m135/200␛[0m ␛[32m━━━━━━━━━━━━━␛[0m␛[37m━━━━━━━␛[0m ␛[1m0s␛[0m 749us/step - accuracy: 0.6573 - loss: 0.6248␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m200/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━━␛[0m␛[37m␛[0m ␛[1m0s␛[0m 794us/step - accuracy: 0.6503 - loss: 0.6284
Epoch 4/5
␛[1m 1/200␛[0m ␛[37m━━━━━━━━━━━━━━━━━━━━␛[0m ␛[1m2s␛[0m 11ms/step - accuracy: 0.7000 - loss: 0.6128␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m 70/200␛[0m ␛[32m━━━━━━━␛[0m␛[37m━━━━━━━━━━━━━␛[0m ␛[1m0s␛[0m 728us/step - accuracy: 0.6661 - loss: 0.6173␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m137/200␛[0m ␛[32m━━━━━━━━━━━━━␛[0m␛[37m━━━━━━━␛[0m ␛[1m0s␛[0m 740us/step - accuracy: 0.6615 - loss: 0.6190␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m200/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━━␛[0m␛[37m␛[0m ␛[1m0s␛[0m 794us/step - accuracy: 0.6576 - loss: 0.6178
Epoch 5/5
␛[1m 1/200␛[0m ␛[37m━━━━━━━━━━━━━━━━━━━━␛[0m ␛[1m2s␛[0m 11ms/step - accuracy: 0.6400 - loss: 0.6144␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m 66/200␛[0m ␛[32m━━━━━━␛[0m␛[37m━━━━━━━━━━━━━━␛[0m ␛[1m0s␛[0m 770us/step - accuracy: 0.6425 - loss: 0.6266␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m132/200␛[0m ␛[32m━━━━━━━━━━━━━␛[0m␛[37m━━━━━━━␛[0m ␛[1m0s␛[0m 769us/step - accuracy: 0.6470 - loss: 0.6239␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m198/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━␛[0m␛[37m━␛[0m ␛[1m0s␛[0m 767us/step - accuracy: 0.6515 - loss: 0.6205␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈␈
␛[1m200/200␛[0m ␛[32m━━━━━━━━━━━━━━━━━━━━␛[0m␛[37m␛[0m ␛[1m0s␛[0m 816us/step - accuracy: 0.6632 - loss: 0.6113
PyKeras: parsing model Higgs_Model_4L_50.keras
Generating inference code for the Keras model from Higgs_Model_4L_50.keras in the header Higgs_Model.hxx
PyKeras: parsing model Higgs_Model_4L_200.keras
Generating inference code for the Keras model from Higgs_Model_4L_200.keras in the header Higgs_Model.hxx
PyKeras: parsing model Higgs_Model_2L_500.keras
Generating inference code for the Keras model from Higgs_Model_2L_500.keras in the header Higgs_Model.hxx
ROC integral for hs1 0.7370066525286233
ROC integral for hs2 0.7398785625917228
ROC integral for hs3 0.7346544987650812
Author
Lorenzo Moneta

Definition in file TMVA_SOFIE_Models.py.