#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/execution/arguments-inl.h"
#include "src/heap/factory.h"
#include "src/logging/counters.h"
#include "src/numbers/conversions-inl.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-shared-array-inl.h"
#include "src/objects/js-struct-inl.h"
#include "src/runtime/runtime-utils.h"
namespace v8 {
namespace internal {
#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390 || \
V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_LOONG64
namespace {
#if defined(V8_OS_STARBOARD)
template <typename T>
inline T ExchangeSeqCst(T* p, T value) {
UNIMPLEMENTED();
}
template <typename T>
inline T CompareExchangeSeqCst(T* p, T oldval, T newval) {
UNIMPLEMENTED();
}
template <typename T>
inline T AddSeqCst(T* p, T value) {
UNIMPLEMENTED();
}
template <typename T>
inline T SubSeqCst(T* p, T value) {
UNIMPLEMENTED();
}
template <typename T>
inline T AndSeqCst(T* p, T value) {
UNIMPLEMENTED();
}
template <typename T>
inline T OrSeqCst(T* p, T value) {
UNIMPLEMENTED();
}
template <typename T>
inline T XorSeqCst(T* p, T value) {
UNIMPLEMENTED();
}
#elif V8_CC_GNU
#ifdef V8_TARGET_ARCH_32_BIT
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wpragmas"
#pragma GCC diagnostic ignored "-Watomic-alignment"
#endif
template <typename T>
inline T LoadSeqCst(T* p) {
return __atomic_load_n(p, __ATOMIC_SEQ_CST);
}
template <typename T>
inline void StoreSeqCst(T* p, T value) {
__atomic_store_n(p, value, __ATOMIC_SEQ_CST);
}
template <typename T>
inline T ExchangeSeqCst(T* p, T value) {
return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST);
}
template <typename T>
inline T CompareExchangeSeqCst(T* p, T oldval, T newval) {
(void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST,
__ATOMIC_SEQ_CST);
return oldval;
}
template <typename T>
inline T AddSeqCst(T* p, T value) {
return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST);
}
template <typename T>
inline T SubSeqCst(T* p, T value) {
return __atomic_fetch_sub(p, value, __ATOMIC_SEQ_CST);
}
template <typename T>
inline T AndSeqCst(T* p, T value) {
return __atomic_fetch_and(p, value, __ATOMIC_SEQ_CST);
}
template <typename T>
inline T OrSeqCst(T* p, T value) {
return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST);
}
template <typename T>
inline T XorSeqCst(T* p, T value) {
return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST);
}
#ifdef V8_TARGET_ARCH_32_BIT
#pragma GCC diagnostic pop
#endif
#elif V8_CC_MSVC
#define InterlockedExchange32 …
#define InterlockedCompareExchange32 …
#define InterlockedCompareExchange8 …
#define InterlockedExchangeAdd32 …
#define InterlockedExchangeAdd16 …
#define InterlockedExchangeAdd8 …
#define InterlockedAnd32 …
#define InterlockedOr64 …
#define InterlockedOr32 …
#define InterlockedXor32 …
#if defined(V8_HOST_ARCH_ARM64)
#define InterlockedExchange8 …
#endif
#define ATOMIC_OPS …
ATOMIC_OPS(int8_t, 8, char)
ATOMIC_OPS(uint8_t, 8, char)
ATOMIC_OPS(int16_t, 16, short)
ATOMIC_OPS(uint16_t, 16, short)
ATOMIC_OPS(int32_t, 32, long)
ATOMIC_OPS(uint32_t, 32, long)
ATOMIC_OPS(int64_t, 64, __int64)
ATOMIC_OPS(uint64_t, 64, __int64)
template <typename T>
inline T LoadSeqCst(T* p) {
UNREACHABLE();
}
template <typename T>
inline void StoreSeqCst(T* p, T value) {
UNREACHABLE();
}
#undef ATOMIC_OPS
#undef InterlockedExchange32
#undef InterlockedCompareExchange32
#undef InterlockedCompareExchange8
#undef InterlockedExchangeAdd32
#undef InterlockedExchangeAdd16
#undef InterlockedExchangeAdd8
#undef InterlockedAnd32
#undef InterlockedOr64
#undef InterlockedOr32
#undef InterlockedXor32
#if defined(V8_HOST_ARCH_ARM64)
#undef InterlockedExchange8
#endif
#else
#error Unsupported platform!
#endif
template <typename T>
T FromObject(Handle<Object> number);
template <>
inline uint8_t FromObject<uint8_t>(Handle<Object> number) {
return NumberToUint32(*number);
}
template <>
inline int8_t FromObject<int8_t>(Handle<Object> number) {
return NumberToInt32(*number);
}
template <>
inline uint16_t FromObject<uint16_t>(Handle<Object> number) {
return NumberToUint32(*number);
}
template <>
inline int16_t FromObject<int16_t>(Handle<Object> number) {
return NumberToInt32(*number);
}
template <>
inline uint32_t FromObject<uint32_t>(Handle<Object> number) {
return NumberToUint32(*number);
}
template <>
inline int32_t FromObject<int32_t>(Handle<Object> number) {
return NumberToInt32(*number);
}
template <>
inline uint64_t FromObject<uint64_t>(Handle<Object> bigint) {
return Cast<BigInt>(bigint)->AsUint64();
}
template <>
inline int64_t FromObject<int64_t>(Handle<Object> bigint) {
return Cast<BigInt>(bigint)->AsInt64();
}
inline Tagged<Object> ToObject(Isolate* isolate, int8_t t) {
return Smi::FromInt(t);
}
inline Tagged<Object> ToObject(Isolate* isolate, uint8_t t) {
return Smi::FromInt(t);
}
inline Tagged<Object> ToObject(Isolate* isolate, int16_t t) {
return Smi::FromInt(t);
}
inline Tagged<Object> ToObject(Isolate* isolate, uint16_t t) {
return Smi::FromInt(t);
}
inline Tagged<Object> ToObject(Isolate* isolate, int32_t t) {
return *isolate->factory()->NewNumber(t);
}
inline Tagged<Object> ToObject(Isolate* isolate, uint32_t t) {
return *isolate->factory()->NewNumber(t);
}
inline Tagged<Object> ToObject(Isolate* isolate, int64_t t) {
return *BigInt::FromInt64(isolate, t);
}
inline Tagged<Object> ToObject(Isolate* isolate, uint64_t t) {
return *BigInt::FromUint64(isolate, t);
}
template <typename T>
struct Load {
static inline Tagged<Object> Do(Isolate* isolate, void* buffer,
size_t index) {
T result = LoadSeqCst(static_cast<T*>(buffer) + index);
return ToObject(isolate, result);
}
};
template <typename T>
struct Store {
static inline void Do(Isolate* isolate, void* buffer, size_t index,
Handle<Object> obj) {
T value = FromObject<T>(obj);
StoreSeqCst(static_cast<T*>(buffer) + index, value);
}
};
template <typename T>
struct Exchange {
static inline Tagged<Object> Do(Isolate* isolate, void* buffer, size_t index,
Handle<Object> obj) {
T value = FromObject<T>(obj);
T result = ExchangeSeqCst(static_cast<T*>(buffer) + index, value);
return ToObject(isolate, result);
}
};
template <typename T>
inline Tagged<Object> DoCompareExchange(Isolate* isolate, void* buffer,
size_t index, Handle<Object> oldobj,
Handle<Object> newobj) {
T oldval = FromObject<T>(oldobj);
T newval = FromObject<T>(newobj);
T result =
CompareExchangeSeqCst(static_cast<T*>(buffer) + index, oldval, newval);
return ToObject(isolate, result);
}
template <typename T>
struct Add {
static inline Tagged<Object> Do(Isolate* isolate, void* buffer, size_t index,
Handle<Object> obj) {
T value = FromObject<T>(obj);
T result = AddSeqCst(static_cast<T*>(buffer) + index, value);
return ToObject(isolate, result);
}
};
template <typename T>
struct Sub {
static inline Tagged<Object> Do(Isolate* isolate, void* buffer, size_t index,
Handle<Object> obj) {
T value = FromObject<T>(obj);
T result = SubSeqCst(static_cast<T*>(buffer) + index, value);
return ToObject(isolate, result);
}
};
template <typename T>
struct And {
static inline Tagged<Object> Do(Isolate* isolate, void* buffer, size_t index,
Handle<Object> obj) {
T value = FromObject<T>(obj);
T result = AndSeqCst(static_cast<T*>(buffer) + index, value);
return ToObject(isolate, result);
}
};
template <typename T>
struct Or {
static inline Tagged<Object> Do(Isolate* isolate, void* buffer, size_t index,
Handle<Object> obj) {
T value = FromObject<T>(obj);
T result = OrSeqCst(static_cast<T*>(buffer) + index, value);
return ToObject(isolate, result);
}
};
template <typename T>
struct Xor {
static inline Tagged<Object> Do(Isolate* isolate, void* buffer, size_t index,
Handle<Object> obj) {
T value = FromObject<T>(obj);
T result = XorSeqCst(static_cast<T*>(buffer) + index, value);
return ToObject(isolate, result);
}
};
}
#define INTEGER_TYPED_ARRAYS …
#define THROW_ERROR_RETURN_FAILURE_ON_DETACHED_OR_OUT_OF_BOUNDS …
template <template <typename> class Op>
Tagged<Object> GetModifySetValueInBuffer(RuntimeArguments args,
Isolate* isolate,
const char* method_name) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
Handle<JSTypedArray> sta = args.at<JSTypedArray>(0);
size_t index = NumberToSize(args[1]);
Handle<Object> value_obj = args.at(2);
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
sta->byte_offset();
if (sta->type() >= kExternalBigInt64Array) {
Handle<BigInt> bigint;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, bigint,
BigInt::FromObject(isolate, value_obj));
THROW_ERROR_RETURN_FAILURE_ON_DETACHED_OR_OUT_OF_BOUNDS(isolate, sta, index,
method_name);
CHECK_LT(index, sta->GetLength());
if (sta->type() == kExternalBigInt64Array) {
return Op<int64_t>::Do(isolate, source, index, bigint);
}
DCHECK(sta->type() == kExternalBigUint64Array);
return Op<uint64_t>::Do(isolate, source, index, bigint);
}
Handle<Object> value;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
Object::ToInteger(isolate, value_obj));
THROW_ERROR_RETURN_FAILURE_ON_DETACHED_OR_OUT_OF_BOUNDS(isolate, sta, index,
method_name);
CHECK_LT(index, sta->GetLength());
switch (sta->type()) {
#define TYPED_ARRAY_CASE …
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
default:
break;
}
UNREACHABLE();
}
RUNTIME_FUNCTION(Runtime_AtomicsLoad64) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
Handle<JSTypedArray> sta = args.at<JSTypedArray>(0);
size_t index = NumberToSize(args[1]);
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
sta->byte_offset();
DCHECK(sta->type() == kExternalBigInt64Array ||
sta->type() == kExternalBigUint64Array);
DCHECK(!sta->IsDetachedOrOutOfBounds());
CHECK_LT(index, sta->GetLength());
if (sta->type() == kExternalBigInt64Array) {
return Load<int64_t>::Do(isolate, source, index);
}
DCHECK(sta->type() == kExternalBigUint64Array);
return Load<uint64_t>::Do(isolate, source, index);
}
RUNTIME_FUNCTION(Runtime_AtomicsStore64) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
Handle<JSTypedArray> sta = args.at<JSTypedArray>(0);
size_t index = NumberToSize(args[1]);
Handle<Object> value_obj = args.at(2);
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
sta->byte_offset();
Handle<BigInt> bigint;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, bigint,
BigInt::FromObject(isolate, value_obj));
THROW_ERROR_RETURN_FAILURE_ON_DETACHED_OR_OUT_OF_BOUNDS(isolate, sta, index,
"Atomics.store");
DCHECK(sta->type() == kExternalBigInt64Array ||
sta->type() == kExternalBigUint64Array);
CHECK_LT(index, sta->GetLength());
if (sta->type() == kExternalBigInt64Array) {
Store<int64_t>::Do(isolate, source, index, bigint);
return *bigint;
}
DCHECK(sta->type() == kExternalBigUint64Array);
Store<uint64_t>::Do(isolate, source, index, bigint);
return *bigint;
}
RUNTIME_FUNCTION(Runtime_AtomicsExchange) {
return GetModifySetValueInBuffer<Exchange>(args, isolate, "Atomics.exchange");
}
RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
Handle<JSTypedArray> sta = args.at<JSTypedArray>(0);
size_t index = NumberToSize(args[1]);
Handle<Object> old_value_obj = args.at(2);
Handle<Object> new_value_obj = args.at(3);
uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) +
sta->byte_offset();
if (sta->type() >= kExternalBigInt64Array) {
Handle<BigInt> old_bigint;
Handle<BigInt> new_bigint;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, old_bigint, BigInt::FromObject(isolate, old_value_obj));
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, new_bigint, BigInt::FromObject(isolate, new_value_obj));
THROW_ERROR_RETURN_FAILURE_ON_DETACHED_OR_OUT_OF_BOUNDS(
isolate, sta, index, "Atomics.compareExchange");
CHECK_LT(index, sta->GetLength());
if (sta->type() == kExternalBigInt64Array) {
return DoCompareExchange<int64_t>(isolate, source, index, old_bigint,
new_bigint);
}
DCHECK(sta->type() == kExternalBigUint64Array);
return DoCompareExchange<uint64_t>(isolate, source, index, old_bigint,
new_bigint);
}
Handle<Object> old_value;
Handle<Object> new_value;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, old_value,
Object::ToInteger(isolate, old_value_obj));
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, new_value,
Object::ToInteger(isolate, new_value_obj));
THROW_ERROR_RETURN_FAILURE_ON_DETACHED_OR_OUT_OF_BOUNDS(
isolate, sta, index, "Atomics.compareExchange");
switch (sta->type()) {
#define TYPED_ARRAY_CASE …
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
default:
break;
}
UNREACHABLE();
}
RUNTIME_FUNCTION(Runtime_AtomicsAdd) {
return GetModifySetValueInBuffer<Add>(args, isolate, "Atomics.add");
}
RUNTIME_FUNCTION(Runtime_AtomicsSub) {
return GetModifySetValueInBuffer<Sub>(args, isolate, "Atomics.sub");
}
RUNTIME_FUNCTION(Runtime_AtomicsAnd) {
return GetModifySetValueInBuffer<And>(args, isolate, "Atomics.and");
}
RUNTIME_FUNCTION(Runtime_AtomicsOr) {
return GetModifySetValueInBuffer<Or>(args, isolate, "Atomics.or");
}
RUNTIME_FUNCTION(Runtime_AtomicsXor) {
return GetModifySetValueInBuffer<Xor>(args, isolate, "Atomics.xor");
}
#undef INTEGER_TYPED_ARRAYS
#else
RUNTIME_FUNCTION(…) { …
RUNTIME_FUNCTION(…) { …
RUNTIME_FUNCTION(…) { …
RUNTIME_FUNCTION(…) { …
RUNTIME_FUNCTION(…) { …
RUNTIME_FUNCTION(…) { …
RUNTIME_FUNCTION(…) { …
RUNTIME_FUNCTION(…) { …
RUNTIME_FUNCTION(…) { …
#endif
RUNTIME_FUNCTION(Runtime_AtomicsLoadSharedStructOrArray) { … }
namespace {
template <typename WriteOperation>
Tagged<Object> AtomicFieldWrite(Isolate* isolate, Handle<JSObject> object,
Handle<Name> field_name,
DirectHandle<Object> value,
WriteOperation write_operation) { … }
}
RUNTIME_FUNCTION(Runtime_AtomicsStoreSharedStructOrArray) { … }
RUNTIME_FUNCTION(Runtime_AtomicsExchangeSharedStructOrArray) { … }
RUNTIME_FUNCTION(Runtime_AtomicsCompareExchangeSharedStructOrArray) { … }
}
}