#ifndef PARTITION_ALLOC_SPINNING_MUTEX_H_
#define PARTITION_ALLOC_SPINNING_MUTEX_H_
#include <algorithm>
#include <atomic>
#include "partition_alloc/build_config.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_base/thread_annotations.h"
#include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_alloc_config.h"
#include "partition_alloc/yield_processor.h"
#if PA_BUILDFLAG(IS_WIN)
#include "partition_alloc/partition_alloc_base/win/windows_types.h"
#endif
#if PA_BUILDFLAG(IS_POSIX)
#include <pthread.h>
#include <cerrno>
#endif
#if PA_BUILDFLAG(IS_APPLE)
#include <os/lock.h>
#endif
#if PA_BUILDFLAG(IS_FUCHSIA)
#include <lib/sync/mutex.h>
#endif
namespace partition_alloc::internal {
class PA_LOCKABLE PA_COMPONENT_EXPORT(PARTITION_ALLOC) SpinningMutex { … };
PA_ALWAYS_INLINE void SpinningMutex::Acquire() { … }
inline constexpr SpinningMutex::SpinningMutex() = default;
#if PA_CONFIG(HAS_LINUX_KERNEL)
PA_ALWAYS_INLINE bool SpinningMutex::Try() { … }
PA_ALWAYS_INLINE void SpinningMutex::Release() { … }
#elif PA_BUILDFLAG(IS_WIN)
PA_ALWAYS_INLINE bool SpinningMutex::Try() {
return !!::TryAcquireSRWLockExclusive(reinterpret_cast<PSRWLOCK>(&lock_));
}
PA_ALWAYS_INLINE void SpinningMutex::Release() {
::ReleaseSRWLockExclusive(reinterpret_cast<PSRWLOCK>(&lock_));
}
#elif PA_BUILDFLAG(IS_APPLE)
PA_ALWAYS_INLINE bool SpinningMutex::Try() {
return os_unfair_lock_trylock(&unfair_lock_);
}
PA_ALWAYS_INLINE void SpinningMutex::Release() {
return os_unfair_lock_unlock(&unfair_lock_);
}
#elif PA_BUILDFLAG(IS_POSIX)
PA_ALWAYS_INLINE bool SpinningMutex::Try() {
int retval = pthread_mutex_trylock(&lock_);
PA_DCHECK(retval == 0 || retval == EBUSY);
return retval == 0;
}
PA_ALWAYS_INLINE void SpinningMutex::Release() {
int retval = pthread_mutex_unlock(&lock_);
PA_DCHECK(retval == 0);
}
#elif PA_BUILDFLAG(IS_FUCHSIA)
PA_ALWAYS_INLINE bool SpinningMutex::Try() {
return sync_mutex_trylock(&lock_) == ZX_OK;
}
PA_ALWAYS_INLINE void SpinningMutex::Release() {
sync_mutex_unlock(&lock_);
}
#else
PA_ALWAYS_INLINE bool SpinningMutex::Try() {
return !lock_.load(std::memory_order_relaxed) &&
!lock_.exchange(true, std::memory_order_acquire);
}
PA_ALWAYS_INLINE void SpinningMutex::Release() {
lock_.store(false, std::memory_order_release);
}
#endif
}
#endif