Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
LossFunctions.hxx
Go to the documentation of this file.
1// @(#)root/tmva/tmva/dnn:$Id$
2// Author: Simon Pfreundschuh 20/07/16
3
4/*************************************************************************
5 * Copyright (C) 2016, Simon Pfreundschuh *
6 * All rights reserved. *
7 * *
8 * For the licensing terms see $ROOTSYS/LICENSE. *
9 * For the list of contributors see $ROOTSYS/README/CREDITS. *
10 *************************************************************************/
11
12 /////////////////////////////////////////////////////////////////////
13 // Implementation of the loss functions for the multi-threaded CPU //
14 // implementation using Roots TThreadExecutor and BLAS. //
15 /////////////////////////////////////////////////////////////////////
16
19
20namespace TMVA
21{
22namespace DNN
23{
24
25//______________________________________________________________________________
26template <typename AFloat>
28 const TCpuMatrix<AFloat> &weights)
29{
30 const AFloat *dataY = Y.GetRawDataPointer();
31 const AFloat *dataOutput = output.GetRawDataPointer();
32 const AFloat *dataWeights = weights.GetRawDataPointer();
33 std::vector<AFloat> temp(Y.GetNoElements());
34 size_t m = Y.GetNrows();
35 AFloat norm = 1.0 / ((AFloat) Y.GetNrows() * Y.GetNcols());
36
37 auto f = [&dataY, &dataOutput, &dataWeights, &temp, m](UInt_t workerID) {
38 AFloat dy = dataY[workerID] - dataOutput[workerID];
39 temp[workerID] = dataWeights[workerID % m] * dy * dy;
40 return 0;
41 };
42
43 auto reduction = [](const std::vector<AFloat> & v )
44 {
45 return std::accumulate(v.begin(),v.end(),AFloat{});
46 };
47
49 return norm * Y.GetThreadExecutor().Reduce(temp, reduction);
50}
51
52//______________________________________________________________________________
53template <typename AFloat>
55 const TCpuMatrix<AFloat> &output, const TCpuMatrix<AFloat> &weights)
56{
57
58 AFloat *dataDY = dY.GetRawDataPointer();
59 const AFloat *dataY = Y.GetRawDataPointer();
60 const AFloat *dataOutput = output.GetRawDataPointer();
61 const AFloat *dataWeights = weights.GetRawDataPointer();
62
63 size_t m = Y.GetNrows();
64 AFloat norm = 1.0 / ((AFloat) Y.GetNrows() * Y.GetNcols());
65
66 auto f = [&dataDY, &dataY, &dataOutput, &dataWeights, m, norm](UInt_t workerID) {
67 dataDY[workerID] = -2.0 * norm * (dataY[workerID] - dataOutput[workerID]);
68 dataDY[workerID] *= dataWeights[workerID % m];
69 return 0;
70 };
71
73}
74
75//______________________________________________________________________________
76template <typename AFloat>
78 const TCpuMatrix<AFloat> &weights)
79{
80 const AFloat *dataY = Y.GetRawDataPointer();
81 const AFloat *dataOutput = output.GetRawDataPointer();
82 const AFloat *dataWeights = weights.GetRawDataPointer();
83 std::vector<AFloat> temp(Y.GetNoElements());
84
85 size_t m = Y.GetNrows();
86 AFloat norm = 1.0 / ((AFloat) Y.GetNrows() * Y.GetNcols());
87
88 auto f = [&dataY, &dataOutput, &dataWeights, &temp, m](UInt_t workerID) {
89 AFloat y = dataY[workerID];
90 AFloat sig = 1.0 / (1.0 + exp(- dataOutput[workerID]));
91 if (y == 0)
92 temp[workerID] = - log(1.0 - sig);
93 else if ( y == 1.)
94 temp[workerID] = - log(sig);
95 else
96 temp[workerID] = - (y * log(sig) + (1.0 - y) * log(1.0 - sig));
97
98 temp[workerID] *= dataWeights[workerID % m];
99 return 0;
100 };
101
102 auto reduction = [](const std::vector<AFloat> & v )
103 {
104 return std::accumulate(v.begin(),v.end(),AFloat{});
105 };
106
108 return norm * Y.GetThreadExecutor().Reduce(temp, reduction);
109}
110
111//______________________________________________________________________________
112template <typename AFloat>
114 const TCpuMatrix<AFloat> &output, const TCpuMatrix<AFloat> &weights)
115{
116 AFloat *dataDY = dY.GetRawDataPointer();
117 const AFloat *dataY = Y.GetRawDataPointer();
118 const AFloat *dataOutput = output.GetRawDataPointer();
119 const AFloat *dataWeights = weights.GetRawDataPointer();
120
121 size_t m = Y.GetNrows();
122 AFloat norm = 1.0 / ((AFloat) Y.GetNrows() * Y.GetNcols());
123
124 auto f = [&dataDY, &dataY, &dataOutput, &dataWeights, m, norm](UInt_t workerID) {
125 AFloat y = dataY[workerID];
126 AFloat sig = 1.0 / (1.0 + exp(- dataOutput[workerID]));
127 dataDY[workerID] = norm * (sig - y);
128 dataDY[workerID] *= dataWeights[workerID % m];
129 return 0;
130 };
131
133}
134
135//______________________________________________________________________________
136template <typename AFloat>
138 const TCpuMatrix<AFloat> &weights)
139{
140 const AFloat *dataY = Y.GetRawDataPointer();
141 const AFloat *dataOutput = output.GetRawDataPointer();
142 const AFloat *dataWeights = weights.GetRawDataPointer();
143
144 std::vector<AFloat> temp(Y.GetNrows());
145 size_t m = Y.GetNrows();
146 size_t n = Y.GetNcols();
147 AFloat norm = 1.0 / ((AFloat) m);
148
149 auto f = [&dataY, &dataOutput, &dataWeights, &temp, n, m](UInt_t workerID) {
150 AFloat sum = 0.0;
151 for (size_t j = 0; j < n; j++) {
152 sum += exp(dataOutput[workerID + j * m]);
153 }
154 for (size_t j = 0; j < n; j++) {
155 temp[workerID] -=
156 dataY[workerID + j * m] * log(exp(dataOutput[workerID + j * m]) / sum);
157 }
158 temp[workerID] *= dataWeights[workerID];
159 return 0;
160 };
161
162 auto reduction = [](const std::vector<AFloat> & v )
163 {
164 return std::accumulate(v.begin(),v.end(),AFloat{});
165 };
166
168 return norm * Y.GetThreadExecutor().Reduce(temp, reduction);
169}
170
171//______________________________________________________________________________
172template <typename AFloat>
174 const TCpuMatrix<AFloat> &output, const TCpuMatrix<AFloat> &weights)
175{
176 AFloat *dataDY = dY.GetRawDataPointer();
177 const AFloat *dataY = Y.GetRawDataPointer();
178 const AFloat *dataOutput = output.GetRawDataPointer();
179 const AFloat *dataWeights = weights.GetRawDataPointer();
180
181 size_t m = Y.GetNrows();
182 size_t n = Y.GetNcols();
183 AFloat norm = 1.0 / ((AFloat) m);
184
185 auto f = [&dataDY, &dataY, &dataOutput, &dataWeights, norm, n, m](UInt_t workerID) {
186 AFloat sum = 0.0;
187 AFloat sumY = 0.0;
188 AFloat weight = dataWeights[workerID];
189 for (size_t j = 0; j < n; j++) {
190 sum += exp(dataOutput[workerID + j * m]);
191 sumY += dataY[workerID + j * m];
192 }
193 for (size_t j = 0; j < n; j++) {
194 dataDY[workerID + j * m] =
195 norm * (exp(dataOutput[workerID + j * m]) / sum * sumY - dataY[workerID + j * m]);
196 dataDY[workerID + j * m] *= weight;
197 }
198 return 0;
199 };
200
202}
203
204} // namespace DNN
205} // namespace TMVA
#define f(i)
Definition RSha256.hxx:104
A pseudo container class which is a generator of indices.
Definition TSeq.hxx:67
The TCpuMatrix class.
Definition CpuMatrix.h:86
size_t GetNcols() const
Definition CpuMatrix.h:156
AFloat * GetRawDataPointer()
Return raw pointer to the elements stored contiguously in column-major order.
Definition CpuMatrix.h:166
size_t GetNrows() const
Definition CpuMatrix.h:155
static Executor & GetThreadExecutor()
Definition CpuMatrix.h:169
size_t GetNoElements() const
Definition CpuMatrix.h:157
static void SoftmaxCrossEntropyGradients(Matrix_t &dY, const Matrix_t &Y, const Matrix_t &output, const Matrix_t &weights)
static void CrossEntropyGradients(Matrix_t &dY, const Matrix_t &Y, const Matrix_t &output, const Matrix_t &weights)
static void MeanSquaredErrorGradients(Matrix_t &dY, const Matrix_t &Y, const Matrix_t &output, const Matrix_t &weights)
static Scalar_t MeanSquaredError(const Matrix_t &Y, const Matrix_t &output, const Matrix_t &weights)
static Scalar_t CrossEntropy(const Matrix_t &Y, const Matrix_t &output, const Matrix_t &weights)
Sigmoid transformation is implicitly applied, thus output should hold the linear activations of the l...
static Scalar_t SoftmaxCrossEntropy(const Matrix_t &Y, const Matrix_t &output, const Matrix_t &weights)
Softmax transformation is implicitly applied, thus output should hold the linear activations of the l...
auto Map(F func, unsigned nTimes) -> std::vector< InvokeResult_t< F > >
Wrap TExecutor::Map functions.
Definition Executor.h:134
auto Reduce(const std::vector< T > &objs, R redfunc) -> decltype(redfunc(objs))
Wrap Reduce function.
Definition Executor.h:162
Double_t y[n]
Definition legend1.C:17
const Int_t n
Definition legend1.C:16
create variable transformations
TMarker m
Definition textangle.C:8
static uint64_t sum(uint64_t i)
Definition Factory.cxx:2345
static void output()