llvm/llvm/lib/CodeGen/AtomicExpandPass.cpp

//===- AtomicExpandPass.cpp - Expand atomic instructions ------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains a pass (at IR level) to replace atomic instructions with
// __atomic_* library calls, or target specific instruction which implement the
// same semantics in a way which better fits the target backend.  This can
// include the use of (intrinsic-based) load-linked/store-conditional loops,
// AtomicCmpXchg, or type coercions.
//
//===----------------------------------------------------------------------===//

#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/InstSimplifyFolder.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/CodeGen/AtomicExpand.h"
#include "llvm/CodeGen/AtomicExpandUtils.h"
#include "llvm/CodeGen/RuntimeLibcallUtil.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/MemoryModelRelaxationAnnotations.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include "llvm/InitializePasses.h"
#include "llvm/Pass.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Transforms/Utils/LowerAtomic.h"
#include <cassert>
#include <cstdint>
#include <iterator>

usingnamespacellvm;

#define DEBUG_TYPE

namespace {

class AtomicExpandImpl {};

class AtomicExpandLegacy : public FunctionPass {};

// IRBuilder to be used for replacement atomic instructions.
struct ReplacementIRBuilder
    : IRBuilder<InstSimplifyFolder, IRBuilderCallbackInserter> {};

} // end anonymous namespace

char AtomicExpandLegacy::ID =;

char &llvm::AtomicExpandID =;

INITIALIZE_PASS_BEGIN(AtomicExpandLegacy, DEBUG_TYPE,
                      "Expand Atomic instructions", false, false)
INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
INITIALIZE_PASS_END(AtomicExpandLegacy, DEBUG_TYPE,
                    "Expand Atomic instructions", false, false)

// Helper functions to retrieve the size of atomic instructions.
static unsigned getAtomicOpSize(LoadInst *LI) {}

static unsigned getAtomicOpSize(StoreInst *SI) {}

static unsigned getAtomicOpSize(AtomicRMWInst *RMWI) {}

static unsigned getAtomicOpSize(AtomicCmpXchgInst *CASI) {}

// Determine if a particular atomic operation has a supported size,
// and is of appropriate alignment, to be passed through for target
// lowering. (Versus turning into a __atomic libcall)
template <typename Inst>
static bool atomicSizeSupported(const TargetLowering *TLI, Inst *I) {}

bool AtomicExpandImpl::processAtomicInstr(Instruction *I) {}

bool AtomicExpandImpl::run(Function &F, const TargetMachine *TM) {}

bool AtomicExpandLegacy::runOnFunction(Function &F) {}

FunctionPass *llvm::createAtomicExpandLegacyPass() {}

PreservedAnalyses AtomicExpandPass::run(Function &F,
                                        FunctionAnalysisManager &AM) {}

bool AtomicExpandImpl::bracketInstWithFences(Instruction *I,
                                             AtomicOrdering Order) {}

/// Get the iX type with the same bitwidth as T.
IntegerType *
AtomicExpandImpl::getCorrespondingIntegerType(Type *T, const DataLayout &DL) {}

/// Convert an atomic load of a non-integral type to an integer load of the
/// equivalent bitwidth.  See the function comment on
/// convertAtomicStoreToIntegerType for background.
LoadInst *AtomicExpandImpl::convertAtomicLoadToIntegerType(LoadInst *LI) {}

AtomicRMWInst *
AtomicExpandImpl::convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI) {}

bool AtomicExpandImpl::tryExpandAtomicLoad(LoadInst *LI) {}

bool AtomicExpandImpl::tryExpandAtomicStore(StoreInst *SI) {}

bool AtomicExpandImpl::expandAtomicLoadToLL(LoadInst *LI) {}

bool AtomicExpandImpl::expandAtomicLoadToCmpXchg(LoadInst *LI) {}

/// Convert an atomic store of a non-integral type to an integer store of the
/// equivalent bitwidth.  We used to not support floating point or vector
/// atomics in the IR at all.  The backends learned to deal with the bitcast
/// idiom because that was the only way of expressing the notion of a atomic
/// float or vector store.  The long term plan is to teach each backend to
/// instruction select from the original atomic store, but as a migration
/// mechanism, we convert back to the old format which the backends understand.
/// Each backend will need individual work to recognize the new format.
StoreInst *AtomicExpandImpl::convertAtomicStoreToIntegerType(StoreInst *SI) {}

void AtomicExpandImpl::expandAtomicStore(StoreInst *SI) {}

static void createCmpXchgInstFun(IRBuilderBase &Builder, Value *Addr,
                                 Value *Loaded, Value *NewVal, Align AddrAlign,
                                 AtomicOrdering MemOpOrder, SyncScope::ID SSID,
                                 Value *&Success, Value *&NewLoaded) {}

bool AtomicExpandImpl::tryExpandAtomicRMW(AtomicRMWInst *AI) {}

namespace {

struct PartwordMaskValues {};

LLVM_ATTRIBUTE_UNUSED
raw_ostream &operator<<(raw_ostream &O, const PartwordMaskValues &PMV) {}

} // end anonymous namespace

/// This is a helper function which builds instructions to provide
/// values necessary for partword atomic operations. It takes an
/// incoming address, Addr, and ValueType, and constructs the address,
/// shift-amounts and masks needed to work with a larger value of size
/// WordSize.
///
/// AlignedAddr: Addr rounded down to a multiple of WordSize
///
/// ShiftAmt: Number of bits to right-shift a WordSize value loaded
///           from AlignAddr for it to have the same value as if
///           ValueType was loaded from Addr.
///
/// Mask: Value to mask with the value loaded from AlignAddr to
///       include only the part that would've been loaded from Addr.
///
/// Inv_Mask: The inverse of Mask.
static PartwordMaskValues createMaskInstrs(IRBuilderBase &Builder,
                                           Instruction *I, Type *ValueType,
                                           Value *Addr, Align AddrAlign,
                                           unsigned MinWordSize) {}

static Value *extractMaskedValue(IRBuilderBase &Builder, Value *WideWord,
                                 const PartwordMaskValues &PMV) {}

static Value *insertMaskedValue(IRBuilderBase &Builder, Value *WideWord,
                                Value *Updated, const PartwordMaskValues &PMV) {}

/// Emit IR to implement a masked version of a given atomicrmw
/// operation. (That is, only the bits under the Mask should be
/// affected by the operation)
static Value *performMaskedAtomicOp(AtomicRMWInst::BinOp Op,
                                    IRBuilderBase &Builder, Value *Loaded,
                                    Value *Shifted_Inc, Value *Inc,
                                    const PartwordMaskValues &PMV) {}

/// Expand a sub-word atomicrmw operation into an appropriate
/// word-sized operation.
///
/// It will create an LL/SC or cmpxchg loop, as appropriate, the same
/// way as a typical atomicrmw expansion. The only difference here is
/// that the operation inside of the loop may operate upon only a
/// part of the value.
void AtomicExpandImpl::expandPartwordAtomicRMW(
    AtomicRMWInst *AI, TargetLoweringBase::AtomicExpansionKind ExpansionKind) {}

/// Copy metadata that's safe to preserve when widening atomics.
static void copyMetadataForAtomic(Instruction &Dest,
                                  const Instruction &Source) {}

// Widen the bitwise atomicrmw (or/xor/and) to the minimum supported width.
AtomicRMWInst *AtomicExpandImpl::widenPartwordAtomicRMW(AtomicRMWInst *AI) {}

bool AtomicExpandImpl::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) {}

void AtomicExpandImpl::expandAtomicOpToLLSC(
    Instruction *I, Type *ResultType, Value *Addr, Align AddrAlign,
    AtomicOrdering MemOpOrder,
    function_ref<Value *(IRBuilderBase &, Value *)> PerformOp) {}

void AtomicExpandImpl::expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI) {}

void AtomicExpandImpl::expandAtomicCmpXchgToMaskedIntrinsic(
    AtomicCmpXchgInst *CI) {}

Value *AtomicExpandImpl::insertRMWLLSCLoop(
    IRBuilderBase &Builder, Type *ResultTy, Value *Addr, Align AddrAlign,
    AtomicOrdering MemOpOrder,
    function_ref<Value *(IRBuilderBase &, Value *)> PerformOp) {}

/// Convert an atomic cmpxchg of a non-integral type to an integer cmpxchg of
/// the equivalent bitwidth.  We used to not support pointer cmpxchg in the
/// IR.  As a migration step, we convert back to what use to be the standard
/// way to represent a pointer cmpxchg so that we can update backends one by
/// one.
AtomicCmpXchgInst *
AtomicExpandImpl::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) {}

bool AtomicExpandImpl::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {}

bool AtomicExpandImpl::isIdempotentRMW(AtomicRMWInst *RMWI) {}

bool AtomicExpandImpl::simplifyIdempotentRMW(AtomicRMWInst *RMWI) {}

Value *AtomicExpandImpl::insertRMWCmpXchgLoop(
    IRBuilderBase &Builder, Type *ResultTy, Value *Addr, Align AddrAlign,
    AtomicOrdering MemOpOrder, SyncScope::ID SSID,
    function_ref<Value *(IRBuilderBase &, Value *)> PerformOp,
    CreateCmpXchgInstFun CreateCmpXchg) {}

bool AtomicExpandImpl::tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) {}

// Note: This function is exposed externally by AtomicExpandUtils.h
bool llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI,
                                    CreateCmpXchgInstFun CreateCmpXchg) {}

// In order to use one of the sized library calls such as
// __atomic_fetch_add_4, the alignment must be sufficient, the size
// must be one of the potentially-specialized sizes, and the value
// type must actually exist in C on the target (otherwise, the
// function wouldn't actually be defined.)
static bool canUseSizedAtomicCall(unsigned Size, Align Alignment,
                                  const DataLayout &DL) {}

void AtomicExpandImpl::expandAtomicLoadToLibcall(LoadInst *I) {}

void AtomicExpandImpl::expandAtomicStoreToLibcall(StoreInst *I) {}

void AtomicExpandImpl::expandAtomicCASToLibcall(AtomicCmpXchgInst *I) {}

static ArrayRef<RTLIB::Libcall> GetRMWLibcall(AtomicRMWInst::BinOp Op) {}

void AtomicExpandImpl::expandAtomicRMWToLibcall(AtomicRMWInst *I) {}

// A helper routine for the above expandAtomic*ToLibcall functions.
//
// 'Libcalls' contains an array of enum values for the particular
// ATOMIC libcalls to be emitted. All of the other arguments besides
// 'I' are extracted from the Instruction subclass by the
// caller. Depending on the particular call, some will be null.
bool AtomicExpandImpl::expandAtomicOpToLibcall(
    Instruction *I, unsigned Size, Align Alignment, Value *PointerOperand,
    Value *ValueOperand, Value *CASExpected, AtomicOrdering Ordering,
    AtomicOrdering Ordering2, ArrayRef<RTLIB::Libcall> Libcalls) {}