Logo ROOT  
Reference Guide
ActivationFunctions.hxx
Go to the documentation of this file.
1// @(#)root/tmva/tmva/dnn:$Id$
2// Author: Simon Pfreundschuh 19/07/16
3
4/*************************************************************************
5 * Copyright (C) 2016, Simon Pfreundschuh *
6 * All rights reserved. *
7 * *
8 * For the licensing terms see $ROOTSYS/LICENSE. *
9 * For the list of contributors see $ROOTSYS/README/CREDITS. *
10 *************************************************************************/
11
12 ///////////////////////////////////////////////////////////////////
13 // Implementation of the activation functions for multi-threaded //
14 // CPU architectures using Roots TThreadExecutor and BLAS. //
15 ///////////////////////////////////////////////////////////////////
16
18#include <math.h>
19
20#ifdef R__HAS_VDT
21#include "vdt/tanh.h"
22#define TANH_IMPL_X vdt::fast_tanhf(x)
23#else
24#define TANH_IMPL_X tanh(x)
25#endif
26
27
28namespace TMVA
29{
30namespace DNN
31{
32
33//______________________________________________________________________________
34template<typename AFloat>
36 const ActivationDescriptor_t /* activationDescr */,
37 const double /* coef */, const AFloat /*alpha */, const AFloat /*beta*/)
38{
39 // scaling and translation is not yet implemented
40 TMVA::DNN::evaluate<TCpu<AFloat>>( X, activFunct);
41}
42//______________________________________________________________________________
43template<typename AFloat>
45 const Tensor_t & dY, const Tensor_t & X,
46 EActivationFunction activFunct,
47 const ActivationDescriptor_t /* activationDescr */,
48 const AFloat /* alpha */, const AFloat /* beta */)
49{
50 // scaling and translation not yet implemented
51 // output tensor (Y) could also be used to speed up derivative calculation
52 // compute dx = f'(x)
53 TMVA::DNN::evaluateDerivative<TCpu<AFloat>>(dX, activFunct, X);
54 // Compute element-wise product. dx = f'(x) * dY
55 Hadamard(dX, dY);
56}
57//______________________________________________________________________________
58template<typename AFloat>
60 const TCpuTensor<AFloat> &/*A*/)
61{
62 auto f = [](AFloat) {return 1.0;};
63 B.Map(f);
64}
65
66//______________________________________________________________________________
67template<typename AFloat>
69{
70 auto f = [](AFloat x) {return (x < 0.0) ? 0.0 : x;};
71 B.Map(f);
72}
73
74//______________________________________________________________________________
75template<typename AFloat>
77 const TCpuTensor<AFloat> &A)
78{
79 auto f = [](AFloat x) {return (x < 0.0) ? 0.0 : 1.0;};
80 B.MapFrom(f, A);
81}
82
83//______________________________________________________________________________
84template<typename AFloat>
86{
87 auto f = [](AFloat x) {return 1.0 / (1.0 + exp(-x));};
88 B.Map(f);
89}
90
91//______________________________________________________________________________
92template<typename AFloat>
94 const TCpuTensor<AFloat> &A)
95{
96 auto f = [](AFloat x) {
97 AFloat sig = 1.0 / (1.0 + exp(-x));
98 return sig * (1.0 - sig);
99 };
100 B.MapFrom(f, A);
101}
102
103//______________________________________________________________________________
104template<typename AFloat>
106{
107 auto f = [](AFloat x) {return TANH_IMPL_X;};
108 B.Map(f);
109}
110
111//______________________________________________________________________________
112template<typename AFloat>
114 const TCpuTensor<AFloat> &A)
115{
116 auto f = [](AFloat x) {
117 AFloat t = TANH_IMPL_X;
118 return 1 - t * t;
119 };
120 B.MapFrom(f, A);
121}
122
123//______________________________________________________________________________
124template<typename AFloat>
126{
127 auto f = [](AFloat x) {return fabs(x);};
128 B.Map(f);
129}
130
131//______________________________________________________________________________
132template<typename AFloat>
134 const TCpuTensor<AFloat> &A)
135{
136 auto f = [](AFloat x) {
137 return (x < 0.0) ? -1.0 : 1.0;
138 };
139 B.MapFrom(f, A);
140}
141
142//______________________________________________________________________________
143template<typename AFloat>
145{
146 auto f = [](AFloat x) {return x / (1 + fabs(x));};
147 B.Map(f);
148}
149
150//______________________________________________________________________________
151template<typename AFloat>
153 const TCpuTensor<AFloat> &A)
154{
155 auto f = [](AFloat x) {
156 x = 1.0 + fabs(x);
157 x = 1.0 / (x * x);
158 return x;
159 };
160 B.MapFrom(f, A);
161}
162
163//______________________________________________________________________________
164template<typename AFloat>
166{
167 auto f = [](AFloat x) {return exp(- x * x);};
168 B.Map(f);
169}
170
171//______________________________________________________________________________
172template<typename AFloat>
174 const TCpuTensor<AFloat> &A)
175{
176 auto f = [](AFloat x) {return - 2.0 * x * exp(- x * x);};
177 B.MapFrom(f, A);
178}
179
180} // namespace DNN
181} // namespace TMVA
#define TANH_IMPL_X
#define f(i)
Definition: RSha256.hxx:104
double exp(double)
static void Gauss(Tensor_t &B)
static void Sigmoid(Tensor_t &B)
static void SoftSign(Tensor_t &B)
static void SymmetricReluDerivative(Tensor_t &B, const Tensor_t &A)
static void SymmetricRelu(Tensor_t &B)
static void TanhDerivative(Tensor_t &B, const Tensor_t &A)
static void Tanh(Tensor_t &B)
static void ActivationFunctionForward(Tensor_t &X, EActivationFunction activFunct, const ActivationDescriptor_t activationDescr, const double coef=0.0, const Scalar_t alpha=1, const Scalar_t beta=0)
static void SoftSignDerivative(Tensor_t &B, const Tensor_t &A)
static void IdentityDerivative(Tensor_t &B, const Tensor_t &A)
static void Relu(Tensor_t &B)
static void ActivationFunctionBackward(Tensor_t &dX, const Tensor_t &Y, const Tensor_t &dY, const Tensor_t &X, EActivationFunction activFunct, const ActivationDescriptor_t activationDescr, const Scalar_t alpha=1, const Scalar_t beta=0)
Computes the gradient of the activation function.
static void GaussDerivative(Tensor_t &B, const Tensor_t &A)
static void SigmoidDerivative(Tensor_t &B, const Tensor_t &A)
static void ReluDerivative(Tensor_t &B, const Tensor_t &A)
Double_t x[n]
Definition: legend1.C:17
static double B[]
static double A[]
VecExpr< UnaryOp< Fabs< T >, VecExpr< A, T, D >, T >, T, D > fabs(const VecExpr< A, T, D > &rhs)
EActivationFunction
Enum that represents layer activation functions.
Definition: Functions.h:32
create variable transformations