You've already forked mariadb-columnstore-engine
mirror of
https://github.com/mariadb-corporation/mariadb-columnstore-engine.git
synced 2025-08-01 06:46:55 +03:00
clang format apply
This commit is contained in:
@ -35,108 +35,97 @@ Boost has something in interprocess::ipcdetail, but it doesn't have 64-bit API's
|
||||
|
||||
namespace atomicops
|
||||
{
|
||||
|
||||
//Returns the resulting, incremented value
|
||||
// Returns the resulting, incremented value
|
||||
template <typename T>
|
||||
inline T atomicInc(volatile T* mem)
|
||||
{
|
||||
#ifdef _MSC_VER
|
||||
|
||||
switch (sizeof(T))
|
||||
{
|
||||
case 4:
|
||||
default:
|
||||
return InterlockedIncrement(reinterpret_cast<volatile LONG*>(mem));
|
||||
switch (sizeof(T))
|
||||
{
|
||||
case 4:
|
||||
default: return InterlockedIncrement(reinterpret_cast<volatile LONG*>(mem));
|
||||
|
||||
case 8:
|
||||
return InterlockedIncrement64(reinterpret_cast<volatile LONGLONG*>(mem));
|
||||
}
|
||||
case 8: return InterlockedIncrement64(reinterpret_cast<volatile LONGLONG*>(mem));
|
||||
}
|
||||
|
||||
#else
|
||||
return __sync_add_and_fetch(mem, 1);
|
||||
return __sync_add_and_fetch(mem, 1);
|
||||
#endif
|
||||
}
|
||||
|
||||
//decrements, but returns the pre-decrement value
|
||||
// decrements, but returns the pre-decrement value
|
||||
template <typename T>
|
||||
inline T atomicDec(volatile T* mem)
|
||||
{
|
||||
#ifdef _MSC_VER
|
||||
|
||||
switch (sizeof(T))
|
||||
{
|
||||
case 4:
|
||||
default:
|
||||
return InterlockedDecrement(reinterpret_cast<volatile LONG*>(mem)) + 1;
|
||||
switch (sizeof(T))
|
||||
{
|
||||
case 4:
|
||||
default: return InterlockedDecrement(reinterpret_cast<volatile LONG*>(mem)) + 1;
|
||||
|
||||
case 8:
|
||||
return InterlockedDecrement64(reinterpret_cast<volatile LONGLONG*>(mem)) + 1;
|
||||
}
|
||||
case 8: return InterlockedDecrement64(reinterpret_cast<volatile LONGLONG*>(mem)) + 1;
|
||||
}
|
||||
|
||||
#else
|
||||
return __sync_fetch_and_add(mem, -1);
|
||||
return __sync_fetch_and_add(mem, -1);
|
||||
#endif
|
||||
}
|
||||
|
||||
//Returns the resulting value (but doesn't need to yet)
|
||||
// Returns the resulting value (but doesn't need to yet)
|
||||
template <typename T>
|
||||
inline T atomicAdd(volatile T* mem, T val)
|
||||
{
|
||||
#ifdef _MSC_VER
|
||||
|
||||
switch (sizeof(T))
|
||||
{
|
||||
case 4:
|
||||
default:
|
||||
InterlockedExchangeAdd(reinterpret_cast<volatile LONG*>(mem), val);
|
||||
break;
|
||||
switch (sizeof(T))
|
||||
{
|
||||
case 4:
|
||||
default: InterlockedExchangeAdd(reinterpret_cast<volatile LONG*>(mem), val); break;
|
||||
|
||||
case 8:
|
||||
InterlockedExchangeAdd64(reinterpret_cast<volatile LONGLONG*>(mem), val);
|
||||
break;
|
||||
}
|
||||
case 8: InterlockedExchangeAdd64(reinterpret_cast<volatile LONGLONG*>(mem), val); break;
|
||||
}
|
||||
|
||||
return *mem;
|
||||
return *mem;
|
||||
#else
|
||||
return __sync_add_and_fetch(mem, val);
|
||||
return __sync_add_and_fetch(mem, val);
|
||||
#endif
|
||||
}
|
||||
|
||||
//Returns the resulting value
|
||||
// Returns the resulting value
|
||||
template <typename T>
|
||||
inline T atomicSub(volatile T* mem, T val)
|
||||
{
|
||||
#ifdef _MSC_VER
|
||||
|
||||
switch (sizeof(T))
|
||||
{
|
||||
case 4:
|
||||
default:
|
||||
InterlockedExchangeAdd(reinterpret_cast<volatile LONG*>(mem), -(static_cast<LONG>(val)));
|
||||
break;
|
||||
switch (sizeof(T))
|
||||
{
|
||||
case 4:
|
||||
default: InterlockedExchangeAdd(reinterpret_cast<volatile LONG*>(mem), -(static_cast<LONG>(val))); break;
|
||||
|
||||
case 8:
|
||||
InterlockedExchangeAdd64(reinterpret_cast<volatile LONGLONG*>(mem), -(static_cast<LONGLONG>(val)));
|
||||
break;
|
||||
}
|
||||
case 8:
|
||||
InterlockedExchangeAdd64(reinterpret_cast<volatile LONGLONG*>(mem), -(static_cast<LONGLONG>(val)));
|
||||
break;
|
||||
}
|
||||
|
||||
return *mem;
|
||||
return *mem;
|
||||
#else
|
||||
return __sync_sub_and_fetch(mem, val);
|
||||
return __sync_sub_and_fetch(mem, val);
|
||||
#endif
|
||||
}
|
||||
|
||||
//Implements a memory barrier
|
||||
// Implements a memory barrier
|
||||
inline void atomicMb()
|
||||
{
|
||||
#ifdef _MSC_VER
|
||||
MemoryBarrier();
|
||||
MemoryBarrier();
|
||||
#else
|
||||
__sync_synchronize();
|
||||
__sync_synchronize();
|
||||
#endif
|
||||
}
|
||||
|
||||
//Returns true iff the CAS took place, that is
|
||||
// Returns true iff the CAS took place, that is
|
||||
// if (*mem == comp) {
|
||||
// *mem = swap;
|
||||
// return true;
|
||||
@ -148,20 +137,21 @@ inline bool atomicCAS(volatile T* mem, T comp, T swap)
|
||||
{
|
||||
#ifdef _MSC_VER
|
||||
|
||||
switch (sizeof(T))
|
||||
{
|
||||
case 4:
|
||||
default:
|
||||
//The function returns the initial value of the mem parameter
|
||||
return (InterlockedCompareExchange(reinterpret_cast<volatile LONG*>(mem), swap, comp) == comp);
|
||||
switch (sizeof(T))
|
||||
{
|
||||
case 4:
|
||||
default:
|
||||
// The function returns the initial value of the mem parameter
|
||||
return (InterlockedCompareExchange(reinterpret_cast<volatile LONG*>(mem), swap, comp) == comp);
|
||||
|
||||
case 8:
|
||||
return (InterlockedCompareExchange64(reinterpret_cast<volatile LONGLONG*>(mem), swap, comp) == comp);
|
||||
}
|
||||
case 8:
|
||||
return (InterlockedCompareExchange64(reinterpret_cast<volatile LONGLONG*>(mem), swap, comp) == comp);
|
||||
}
|
||||
|
||||
#else
|
||||
//If the current value of *mem is comp, then write swap into *comp. Return true if the comparison is successful and swap was written.
|
||||
return __sync_bool_compare_and_swap(mem, comp, swap);
|
||||
// If the current value of *mem is comp, then write swap into *comp. Return true if the comparison is
|
||||
// successful and swap was written.
|
||||
return __sync_bool_compare_and_swap(mem, comp, swap);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -170,33 +160,29 @@ template <typename T>
|
||||
inline void atomicZero(volatile T* mem)
|
||||
{
|
||||
#ifdef _MSC_VER
|
||||
|
||||
switch (sizeof(T))
|
||||
{
|
||||
case 4:
|
||||
default:
|
||||
InterlockedXor(reinterpret_cast<volatile LONG*>(mem),(static_cast<LONG>(*mem)));
|
||||
break;
|
||||
|
||||
case 8:
|
||||
InterlockedXor64(reinterpret_cast<volatile LONG*>(mem),(static_cast<LONG>(*mem)));
|
||||
break;
|
||||
}
|
||||
#else
|
||||
__sync_xor_and_fetch(mem, *mem);
|
||||
#endif
|
||||
|
||||
switch (sizeof(T))
|
||||
{
|
||||
case 4:
|
||||
default: InterlockedXor(reinterpret_cast<volatile LONG*>(mem), (static_cast<LONG>(*mem))); break;
|
||||
|
||||
case 8: InterlockedXor64(reinterpret_cast<volatile LONG*>(mem), (static_cast<LONG>(*mem))); break;
|
||||
}
|
||||
#else
|
||||
__sync_xor_and_fetch(mem, *mem);
|
||||
#endif
|
||||
}
|
||||
|
||||
//Implements a scheduler yield
|
||||
// Implements a scheduler yield
|
||||
inline void atomicYield()
|
||||
{
|
||||
#ifdef _MSC_VER
|
||||
SwitchToThread();
|
||||
SwitchToThread();
|
||||
#else
|
||||
sched_yield();
|
||||
sched_yield();
|
||||
#endif
|
||||
}
|
||||
|
||||
}
|
||||
} // namespace atomicops
|
||||
|
||||
#endif
|
||||
|
Reference in New Issue
Block a user