chromium/v8/test/cctest/compiler/test-atomic-load-store-codegen.cc

// Copyright 2021 the V8 project authors. All rights reserved. Use of this
// source code is governed by a BSD-style license that can be found in the
// LICENSE file.

#include "src/base/bits.h"
#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/codegen-tester.h"
#include "test/common/value-helper.h"

namespace v8 {
namespace internal {
namespace compiler {

#if V8_TARGET_LITTLE_ENDIAN
#define LSB(addr, bytes)
#elif V8_TARGET_BIG_ENDIAN
#define LSB
#else
#error "Unknown Architecture"
#endif

#define TEST_ATOMIC_LOAD_INTEGER(ctype, itype, mach_type, order)

TEST(AcquireLoadInteger) {}

TEST(SeqCstLoadInteger) {}

namespace {
// Mostly same as CHECK_EQ() but customized for compressed tagged values.
template <typename CType>
void CheckEq(CType in_value, CType out_value) {}

#ifdef V8_COMPRESS_POINTERS
// Specializations for checking the result of compressing store.
template <>
void CheckEq<Tagged<Object>>(Tagged<Object> in_value,
                             Tagged<Object> out_value) {}

template <>
void CheckEq<Tagged<HeapObject>>(Tagged<HeapObject> in_value,
                                 Tagged<HeapObject> out_value) {}

template <>
void CheckEq<Tagged<Smi>>(Tagged<Smi> in_value, Tagged<Smi> out_value) {}
#endif

template <typename T>
void InitBuffer(Tagged<T>* buffer, size_t length, MachineType type) {}

template <typename T>
void AtomicLoadTagged(MachineType type, AtomicMemoryOrder order) {}
}  // namespace

TEST(AcquireLoadTagged) {}

TEST(SeqCstLoadTagged) {}

#define TEST_ATOMIC_STORE_INTEGER(ctype, itype, mach_type, order)

TEST(ReleaseStoreInteger) {}

TEST(SeqCstStoreInteger) {}

namespace {
template <typename T>
void AtomicStoreTagged(MachineType type, AtomicMemoryOrder order) {}
}  // namespace

TEST(ReleaseStoreTagged) {}

TEST(SeqCstStoreTagged) {}

#if V8_TARGET_ARCH_32_BIT

namespace {
void TestAtomicPairLoadInteger(AtomicMemoryOrder order) {
  uint64_t buffer[1];
  uint32_t high;
  uint32_t low;

  BufferedRawMachineAssemblerTester<int32_t> m;
  Node* base = m.PointerConstant(&buffer[0]);
  Node* index = m.Int32Constant(0);

  Node* pair_load = m.AtomicLoad64(
      AtomicLoadParameters(MachineType::Uint64(), order), base, index);
  m.StoreToPointer(&low, MachineRepresentation::kWord32,
                   m.Projection(0, pair_load));
  m.StoreToPointer(&high, MachineRepresentation::kWord32,
                   m.Projection(1, pair_load));

  int32_t OK = 0x29000;
  m.Return(m.Int32Constant(OK));

  FOR_UINT64_INPUTS(i) {
    buffer[0] = i;
    CHECK_EQ(OK, m.Call());
    CHECK_EQ(i, make_uint64(high, low));
  }
}
}  // namespace

TEST(AcquirePairLoadInteger) {
  TestAtomicPairLoadInteger(AtomicMemoryOrder::kAcqRel);
}

TEST(SeqCstPairLoadInteger) {
  TestAtomicPairLoadInteger(AtomicMemoryOrder::kSeqCst);
}

namespace {
void TestAtomicPairStoreInteger(AtomicMemoryOrder order) {
  uint64_t buffer[1];

  BufferedRawMachineAssemblerTester<int32_t> m(MachineType::Uint32(),
                                               MachineType::Uint32());
  Node* base = m.PointerConstant(&buffer[0]);
  Node* index = m.Int32Constant(0);

  m.AtomicStore64(AtomicStoreParameters(MachineRepresentation::kWord64,
                                        kNoWriteBarrier, order),
                  base, index, m.Parameter(0), m.Parameter(1));

  int32_t OK = 0x29000;
  m.Return(m.Int32Constant(OK));

  FOR_UINT64_INPUTS(i) {
    CHECK_EQ(OK, m.Call(static_cast<uint32_t>(i & 0xFFFFFFFF),
                        static_cast<uint32_t>(i >> 32)));
    CHECK_EQ(i, buffer[0]);
  }
}
}  // namespace

TEST(ReleasePairStoreInteger) {
  TestAtomicPairStoreInteger(AtomicMemoryOrder::kAcqRel);
}

TEST(SeqCstPairStoreInteger) {
  TestAtomicPairStoreInteger(AtomicMemoryOrder::kSeqCst);
}

#endif  // V8_TARGET_ARCH_32_BIT

}  // namespace compiler
}  // namespace internal
}  // namespace v8