26template <
typename AFloat>
30 const AFloat *
dataY = Y.GetRawDataPointer();
32 const AFloat *
dataWeights = weights.GetRawDataPointer();
33 std::vector<AFloat> temp(Y.GetNoElements());
34 size_t m = Y.GetNrows();
35 AFloat
norm = 1.0 / ((AFloat) Y.GetNrows() * Y.GetNcols());
43 auto reduction = [](
const std::vector<AFloat> &
v )
45 return std::accumulate(
v.begin(),
v.end(),AFloat{});
48 Y.GetThreadExecutor().Map(
f,
ROOT::TSeqI(Y.GetNoElements()));
53template <
typename AFloat>
58 AFloat *
dataDY =
dY.GetRawDataPointer();
59 const AFloat *
dataY = Y.GetRawDataPointer();
61 const AFloat *
dataWeights = weights.GetRawDataPointer();
63 size_t m = Y.GetNrows();
64 AFloat
norm = 1.0 / ((AFloat) Y.GetNrows() * Y.GetNcols());
72 Y.GetThreadExecutor().Map(
f,
ROOT::TSeqI(Y.GetNoElements()));
76template <
typename AFloat>
80 const AFloat *
dataY = Y.GetRawDataPointer();
82 const AFloat *
dataWeights = weights.GetRawDataPointer();
83 std::vector<AFloat> temp(Y.GetNoElements());
85 size_t m = Y.GetNrows();
86 AFloat
norm = 1.0 / ((AFloat) Y.GetNrows() * Y.GetNcols());
100 lr = std::log(1. + exp(-
x));
109 auto reduction = [](
const std::vector<AFloat> &
v )
111 return std::accumulate(
v.begin(),
v.end(),AFloat{});
114 Y.GetThreadExecutor().Map(
f,
ROOT::TSeqI(Y.GetNoElements()));
119template <
typename AFloat>
123 AFloat *
dataDY =
dY.GetRawDataPointer();
124 const AFloat *
dataY = Y.GetRawDataPointer();
126 const AFloat *
dataWeights = weights.GetRawDataPointer();
128 size_t m = Y.GetNrows();
129 AFloat
norm = 1.0 / ((AFloat) Y.GetNrows() * Y.GetNcols());
139 Y.GetThreadExecutor().Map(
f,
ROOT::TSeqI(Y.GetNoElements()));
143template <
typename AFloat>
147 const AFloat *
dataY = Y.GetRawDataPointer();
149 const AFloat *
dataWeights = weights.GetRawDataPointer();
151 std::vector<AFloat> temp(Y.GetNrows());
152 size_t m = Y.GetNrows();
153 size_t n = Y.GetNcols();
154 AFloat
norm = 1.0 / ((AFloat)
m);
158 for (
size_t j = 0;
j <
n;
j++) {
161 for (
size_t j = 0;
j <
n;
j++) {
169 auto reduction = [](
const std::vector<AFloat> &
v )
171 return std::accumulate(
v.begin(),
v.end(),AFloat{});
174 Y.GetThreadExecutor().Map(
f,
ROOT::TSeqI(Y.GetNrows()));
179template <
typename AFloat>
183 AFloat *
dataDY =
dY.GetRawDataPointer();
184 const AFloat *
dataY = Y.GetRawDataPointer();
186 const AFloat *
dataWeights = weights.GetRawDataPointer();
188 size_t m = Y.GetNrows();
189 size_t n = Y.GetNcols();
190 AFloat
norm = 1.0 / ((AFloat)
m);
196 for (
size_t j = 0;
j <
n;
j++) {
200 for (
size_t j = 0;
j <
n;
j++) {
208 Y.GetThreadExecutor().Map(
f,
ROOT::TSeqI(Y.GetNrows()));
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
static void SoftmaxCrossEntropyGradients(Matrix_t &dY, const Matrix_t &Y, const Matrix_t &output, const Matrix_t &weights)
static void CrossEntropyGradients(Matrix_t &dY, const Matrix_t &Y, const Matrix_t &output, const Matrix_t &weights)
static void MeanSquaredErrorGradients(Matrix_t &dY, const Matrix_t &Y, const Matrix_t &output, const Matrix_t &weights)
static Scalar_t MeanSquaredError(const Matrix_t &Y, const Matrix_t &output, const Matrix_t &weights)
static Scalar_t CrossEntropy(const Matrix_t &Y, const Matrix_t &output, const Matrix_t &weights)
Sigmoid transformation is implicitly applied, thus output should hold the linear activations of the l...
static Scalar_t SoftmaxCrossEntropy(const Matrix_t &Y, const Matrix_t &output, const Matrix_t &weights)
Softmax transformation is implicitly applied, thus output should hold the linear activations of the l...
create variable transformations
static uint64_t sum(uint64_t i)