#include "src/base/SkSharedMutex.h"
#include "include/private/base/SkAssert.h"
#include "include/private/base/SkSemaphore.h"
#include <cinttypes>
#include <cstdint>
#if !defined(__has_feature)
#define __has_feature …
#endif
#if __has_feature(thread_sanitizer)
#define ANNOTATE_RWLOCK_CREATE …
#define ANNOTATE_RWLOCK_DESTROY …
#define ANNOTATE_RWLOCK_ACQUIRED …
#define ANNOTATE_RWLOCK_RELEASED …
#if defined(DYNAMIC_ANNOTATIONS_WANT_ATTRIBUTE_WEAK)
#if defined(__GNUC__)
#define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK …
#else
#error weak annotations are not supported for your compiler
#endif
#else
#define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK
#endif
extern "C" {
void AnnotateRWLockCreate(
const char *file, int line,
const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
void AnnotateRWLockDestroy(
const char *file, int line,
const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
void AnnotateRWLockAcquired(
const char *file, int line,
const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
void AnnotateRWLockReleased(
const char *file, int line,
const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK;
}
#else
#define ANNOTATE_RWLOCK_CREATE(lock) …
#define ANNOTATE_RWLOCK_DESTROY(lock) …
#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) …
#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) …
#endif
#ifdef SK_DEBUG
#include "include/private/base/SkTDArray.h"
#include "include/private/base/SkThreadID.h"
class SkSharedMutex::ThreadIDSet { … };
SkSharedMutex::SkSharedMutex()
: … { … }
SkSharedMutex::~SkSharedMutex() { … }
void SkSharedMutex::acquire() { … }
void SkSharedMutex::release() { … }
void SkSharedMutex::assertHeld() const { … }
void SkSharedMutex::acquireShared() { … }
void SkSharedMutex::releaseShared() { … }
void SkSharedMutex::assertHeldShared() const { … }
#else
static const int kLogThreadCount = 10;
enum {
kSharedOffset = (0 * kLogThreadCount),
kWaitingExlusiveOffset = (1 * kLogThreadCount),
kWaitingSharedOffset = (2 * kLogThreadCount),
kSharedMask = ((1 << kLogThreadCount) - 1) << kSharedOffset,
kWaitingExclusiveMask = ((1 << kLogThreadCount) - 1) << kWaitingExlusiveOffset,
kWaitingSharedMask = ((1 << kLogThreadCount) - 1) << kWaitingSharedOffset,
};
SkSharedMutex::SkSharedMutex() : fQueueCounts(0) { ANNOTATE_RWLOCK_CREATE(this); }
SkSharedMutex::~SkSharedMutex() { ANNOTATE_RWLOCK_DESTROY(this); }
void SkSharedMutex::acquire() {
int32_t oldQueueCounts = fQueueCounts.fetch_add(1 << kWaitingExlusiveOffset,
std::memory_order_acquire);
if ((oldQueueCounts & kWaitingExclusiveMask) > 0 || (oldQueueCounts & kSharedMask) > 0) {
fExclusiveQueue.wait();
}
ANNOTATE_RWLOCK_ACQUIRED(this, 1);
}
void SkSharedMutex::release() {
ANNOTATE_RWLOCK_RELEASED(this, 1);
int32_t oldQueueCounts = fQueueCounts.load(std::memory_order_relaxed);
int32_t waitingShared;
int32_t newQueueCounts;
do {
newQueueCounts = oldQueueCounts;
newQueueCounts -= 1 << kWaitingExlusiveOffset;
waitingShared = (oldQueueCounts & kWaitingSharedMask) >> kWaitingSharedOffset;
if (waitingShared > 0) {
newQueueCounts &= ~kWaitingSharedMask;
newQueueCounts |= waitingShared << kSharedOffset;
}
} while (!fQueueCounts.compare_exchange_strong(oldQueueCounts, newQueueCounts,
std::memory_order_release,
std::memory_order_relaxed));
if (waitingShared > 0) {
fSharedQueue.signal(waitingShared);
} else if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
fExclusiveQueue.signal();
}
}
void SkSharedMutex::acquireShared() {
int32_t oldQueueCounts = fQueueCounts.load(std::memory_order_relaxed);
int32_t newQueueCounts;
do {
newQueueCounts = oldQueueCounts;
if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
newQueueCounts += 1 << kWaitingSharedOffset;
} else {
newQueueCounts += 1 << kSharedOffset;
}
} while (!fQueueCounts.compare_exchange_strong(oldQueueCounts, newQueueCounts,
std::memory_order_acquire,
std::memory_order_relaxed));
if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
fSharedQueue.wait();
}
ANNOTATE_RWLOCK_ACQUIRED(this, 0);
}
void SkSharedMutex::releaseShared() {
ANNOTATE_RWLOCK_RELEASED(this, 0);
int32_t oldQueueCounts = fQueueCounts.fetch_sub(1 << kSharedOffset,
std::memory_order_release);
if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1
&& (oldQueueCounts & kWaitingExclusiveMask) > 0) {
fExclusiveQueue.signal();
}
}
#endif