Logo ROOT  
Reference Guide
 
Loading...
Searching...
No Matches
RHistUtils.hxx
Go to the documentation of this file.
1/// \file
2/// \warning This file contains implementation details that will change without notice. User code should never include
3/// this header directly.
4
5#ifndef ROOT_RHistUtils
6#define ROOT_RHistUtils
7
8#include <atomic>
9#include <type_traits>
10
11#ifdef _MSC_VER
12#include <intrin.h>
13#endif
14
15namespace ROOT {
16namespace Experimental {
17namespace Internal {
18
19template <typename T, typename... Ts>
20struct LastType : LastType<Ts...> {};
21template <typename T>
22struct LastType<T> {
23 using type = T;
24};
25
26#ifdef _MSC_VER
27namespace MSVC {
28template <std::size_t N>
29struct AtomicOps {};
30
31template <>
32struct AtomicOps<1> {
33 static void Load(const void *ptr, void *ret)
34 {
35 *static_cast<char *>(ret) = __iso_volatile_load8(static_cast<const char *>(ptr));
36 }
37 static void Store(void *ptr, void *val)
38 {
39 __iso_volatile_store8(static_cast<char *>(ptr), *static_cast<char *>(val));
40 }
41 static void Add(void *ptr, const void *val)
42 {
43 _InterlockedExchangeAdd8(static_cast<char *>(ptr), *static_cast<const char *>(val));
44 }
45 static bool CompareExchange(void *ptr, void *expected, const void *desired)
46 {
47 // MSVC functions have the arguments reversed...
48 const char expectedVal = *static_cast<char *>(expected);
49 const char desiredVal = *static_cast<const char *>(desired);
50 const char previous = _InterlockedCompareExchange8(static_cast<char *>(ptr), desiredVal, expectedVal);
51 if (previous == expectedVal) {
52 return true;
53 }
54 *static_cast<char *>(expected) = previous;
55 return false;
56 }
57};
58
59template <>
60struct AtomicOps<2> {
61 static void Load(const void *ptr, void *ret)
62 {
63 *static_cast<short *>(ret) = __iso_volatile_load16(static_cast<const short *>(ptr));
64 }
65 static void Store(void *ptr, void *val)
66 {
67 __iso_volatile_store16(static_cast<short *>(ptr), *static_cast<short *>(val));
68 }
69 static void Add(void *ptr, const void *val)
70 {
71 _InterlockedExchangeAdd16(static_cast<short *>(ptr), *static_cast<const short *>(val));
72 }
73 static bool CompareExchange(void *ptr, void *expected, const void *desired)
74 {
75 // MSVC functions have the arguments reversed...
76 const short expectedVal = *static_cast<short *>(expected);
77 const short desiredVal = *static_cast<const short *>(desired);
78 const short previous = _InterlockedCompareExchange16(static_cast<short *>(ptr), desiredVal, expectedVal);
79 if (previous == expectedVal) {
80 return true;
81 }
82 *static_cast<short *>(expected) = previous;
83 return false;
84 }
85};
86
87template <>
88struct AtomicOps<4> {
89 static void Load(const void *ptr, void *ret)
90 {
91 *static_cast<int *>(ret) = __iso_volatile_load32(static_cast<const int *>(ptr));
92 }
93 static void Store(void *ptr, void *val)
94 {
95 __iso_volatile_store32(static_cast<int *>(ptr), *static_cast<int *>(val));
96 }
97 static void Add(void *ptr, const void *val)
98 {
99 _InterlockedExchangeAdd(static_cast<long *>(ptr), *static_cast<const long *>(val));
100 }
101 static bool CompareExchange(void *ptr, void *expected, const void *desired)
102 {
103 // MSVC functions have the arguments reversed...
104 const long expectedVal = *static_cast<long *>(expected);
105 const long desiredVal = *static_cast<const long *>(desired);
106 const long previous = _InterlockedCompareExchange(static_cast<long *>(ptr), desiredVal, expectedVal);
107 if (previous == expectedVal) {
108 return true;
109 }
110 *static_cast<long *>(expected) = previous;
111 return false;
112 }
113};
114
115template <>
116struct AtomicOps<8> {
117 static void Load(const void *ptr, void *ret)
118 {
119 *static_cast<__int64 *>(ret) = __iso_volatile_load64(static_cast<const __int64 *>(ptr));
120 }
121 static void Store(void *ptr, void *val)
122 {
123 __iso_volatile_store64(static_cast<__int64 *>(ptr), *static_cast<__int64 *>(val));
124 }
125 static void Add(void *ptr, const void *val);
126 static bool CompareExchange(void *ptr, void *expected, const void *desired)
127 {
128 // MSVC functions have the arguments reversed...
129 const __int64 expectedVal = *static_cast<__int64 *>(expected);
130 const __int64 desiredVal = *static_cast<const __int64 *>(desired);
132 if (previous == expectedVal) {
133 return true;
134 }
135 *static_cast<__int64 *>(expected) = previous;
136 return false;
137 }
138};
139} // namespace MSVC
140#endif
141
142template <typename T>
143void AtomicLoad(const T *ptr, T *ret)
144{
145#ifndef _MSC_VER
147#else
148 MSVC::AtomicOps<sizeof(T)>::Load(ptr, ret);
149#endif
150}
151
152template <typename T>
153void AtomicStoreRelease(T *ptr, T *val)
154{
155#ifndef _MSC_VER
157#else
158 // Cannot specify the memory order directly, use a fence.
159 std::atomic_thread_fence(std::memory_order_release);
160 MSVC::AtomicOps<sizeof(T)>::Store(ptr, val);
161#endif
162}
163
164template <typename T>
166{
167#ifndef _MSC_VER
169#else
170 return MSVC::AtomicOps<sizeof(T)>::CompareExchange(ptr, expected, desired);
171#endif
172}
173
174template <typename T>
176{
177#ifndef _MSC_VER
179#else
180 bool success = MSVC::AtomicOps<sizeof(T)>::CompareExchange(ptr, expected, desired);
181 // Cannot specify the memory order directly, use an unconditional fence to avoid branching code.
182 std::atomic_thread_fence(std::memory_order_acquire);
183 return success;
184#endif
185}
186
187template <typename T>
189{
190 T expected;
191 AtomicLoad(ptr, &expected);
192 T desired = expected + val;
193 while (!AtomicCompareExchange(ptr, &expected, &desired)) {
194 // expected holds the new value; try again.
195 desired = expected + val;
196 }
197}
198
199#ifdef _MSC_VER
200namespace MSVC {
201inline void AtomicOps<8>::Add(void *ptr, const void *val)
202{
203#if _WIN64
204 _InterlockedExchangeAdd64(static_cast<__int64 *>(ptr), *static_cast<const __int64 *>(val));
205#else
206 AtomicAddCompareExchangeLoop(static_cast<__int64 *>(ptr), *static_cast<const __int64 *>(val));
207#endif
208}
209} // namespace MSVC
210#endif
211
212template <typename T>
213std::enable_if_t<std::is_integral_v<T>> AtomicAdd(T *ptr, T val)
214{
215#ifndef _MSC_VER
217#else
218 MSVC::AtomicOps<sizeof(T)>::Add(ptr, &val);
219#endif
220}
221
222template <typename T>
223std::enable_if_t<std::is_floating_point_v<T>> AtomicAdd(T *ptr, T val)
224{
226}
227
228// For adding a double-precision weight to a single-precision bin content type, cast the argument once before the
229// compare-exchange loop.
230static inline void AtomicAdd(float *ptr, double val)
231{
232 AtomicAdd(ptr, static_cast<float>(val));
233}
234
235template <typename T>
236std::enable_if_t<std::is_arithmetic_v<T>> AtomicInc(T *ptr)
237{
238 AtomicAdd(ptr, static_cast<T>(1));
239}
240
241template <typename T, typename U>
242auto AtomicAdd(T *ptr, const U &add) -> decltype(ptr->AtomicAdd(add))
243{
244 return ptr->AtomicAdd(add);
245}
246
247template <typename T>
248auto AtomicInc(T *ptr) -> decltype(ptr->AtomicInc())
249{
250 return ptr->AtomicInc();
251}
252
253} // namespace Internal
254} // namespace Experimental
255} // namespace ROOT
256
257#endif
ROOT::Detail::TRangeCast< T, true > TRangeDynCast
TRangeDynCast is an adapter class that allows the typed iteration through a TCollection.
std::enable_if_t< std::is_arithmetic_v< T > > AtomicInc(T *ptr)
void AtomicLoad(const T *ptr, T *ret)
void AtomicStoreRelease(T *ptr, T *val)
bool AtomicCompareExchange(T *ptr, T *expected, T *desired)
bool AtomicCompareExchangeAcquire(T *ptr, T *expected, T *desired)
void AtomicAddCompareExchangeLoop(T *ptr, T val)
std::enable_if_t< std::is_integral_v< T > > AtomicAdd(T *ptr, T val)
TMatrixT< Element > & Add(TMatrixT< Element > &target, Element scalar, const TMatrixT< Element > &source)
Modify addition: target += scalar * source.