#include "ABIInfo.h"
#include "CGCUDARuntime.h"
#include "CGCXXABI.h"
#include "CGHLSLRuntime.h"
#include "CGObjCRuntime.h"
#include "CGOpenCLRuntime.h"
#include "CGRecordLayout.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "ConstantEmitter.h"
#include "PatternInit.h"
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/OSLog.h"
#include "clang/AST/OperationKinds.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/FloatingPointMode.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsAArch64.h"
#include "llvm/IR/IntrinsicsAMDGPU.h"
#include "llvm/IR/IntrinsicsARM.h"
#include "llvm/IR/IntrinsicsBPF.h"
#include "llvm/IR/IntrinsicsDirectX.h"
#include "llvm/IR/IntrinsicsHexagon.h"
#include "llvm/IR/IntrinsicsNVPTX.h"
#include "llvm/IR/IntrinsicsPowerPC.h"
#include "llvm/IR/IntrinsicsR600.h"
#include "llvm/IR/IntrinsicsRISCV.h"
#include "llvm/IR/IntrinsicsS390.h"
#include "llvm/IR/IntrinsicsVE.h"
#include "llvm/IR/IntrinsicsWebAssembly.h"
#include "llvm/IR/IntrinsicsX86.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/MatrixBuilder.h"
#include "llvm/IR/MemoryModelRelaxationAnnotations.h"
#include "llvm/Support/AMDGPUAddrSpace.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/ScopedPrinter.h"
#include "llvm/TargetParser/AArch64TargetParser.h"
#include "llvm/TargetParser/RISCVISAInfo.h"
#include "llvm/TargetParser/X86TargetParser.h"
#include <optional>
#include <sstream>
usingnamespaceclang;
usingnamespaceCodeGen;
usingnamespacellvm;
static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size,
Align AlignmentInBytes) { … }
llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
unsigned BuiltinID) { … }
static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
QualType T, llvm::IntegerType *IntType) { … }
static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
QualType T, llvm::Type *ResultType) { … }
static Address CheckAtomicAlignment(CodeGenFunction &CGF, const CallExpr *E) { … }
static Value *MakeBinaryAtomicValue(
CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E,
AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) { … }
static Value *EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E) { … }
static Value *EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E) { … }
static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
llvm::AtomicRMWInst::BinOp Kind,
const CallExpr *E) { … }
static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
llvm::AtomicRMWInst::BinOp Kind,
const CallExpr *E,
Instruction::BinaryOps Op,
bool Invert = false) { … }
static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E,
bool ReturnBool) { … }
static
Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E,
AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) { … }
static Value *EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction &CGF,
const CallExpr *E,
AtomicOrdering SuccessOrdering) { … }
static Value *EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E,
AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) { … }
static Value *EmitAtomicDecrementValue(
CodeGenFunction &CGF, const CallExpr *E,
AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) { … }
static Value *EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E) { … }
static Value *EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E) { … }
static Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
const CallExpr *E, unsigned IntrinsicID,
unsigned ConstrainedIntrinsicID) { … }
static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
const CallExpr *E, unsigned IntrinsicID,
unsigned ConstrainedIntrinsicID) { … }
static Value *emitBinaryExpMaybeConstrainedFPBuiltin(
CodeGenFunction &CGF, const CallExpr *E, llvm::Intrinsic::ID IntrinsicID,
llvm::Intrinsic::ID ConstrainedIntrinsicID) { … }
static Value *emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
const CallExpr *E, unsigned IntrinsicID,
unsigned ConstrainedIntrinsicID) { … }
static Value *emitCallMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
unsigned IntrinsicID,
unsigned ConstrainedIntrinsicID,
llvm::Type *Ty,
ArrayRef<Value *> Args) { … }
template <unsigned N>
static Value *emitBuiltinWithOneOverloadedType(CodeGenFunction &CGF,
const CallExpr *E,
unsigned IntrinsicID,
llvm::StringRef Name = "") { … }
static Value *emitFPIntBuiltin(CodeGenFunction &CGF,
const CallExpr *E,
unsigned IntrinsicID) { … }
static Value *
emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E,
unsigned IntrinsicID,
unsigned ConstrainedIntrinsicID) { … }
static Value *emitFrexpBuiltin(CodeGenFunction &CGF, const CallExpr *E,
llvm::Intrinsic::ID IntrinsicID) { … }
static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) { … }
static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) { … }
static bool HasNoIndirectArgumentsOrResults(CGFunctionInfo const &FnInfo) { … }
static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *FD,
const CallExpr *E, llvm::Constant *calleeValue) { … }
static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
const llvm::Intrinsic::ID IntrinsicID,
llvm::Value *X, llvm::Value *Y,
llvm::Value *&Carry) { … }
static Value *emitRangedBuiltin(CodeGenFunction &CGF, unsigned IntrinsicID,
int low, int high) { … }
namespace {
struct WidthAndSignedness { … };
}
static WidthAndSignedness
getIntegerWidthAndSignedness(const clang::ASTContext &context,
const clang::QualType Type) { … }
static struct WidthAndSignedness
EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) { … }
Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) { … }
static bool areBOSTypesCompatible(int From, int To) { … }
static llvm::Value *
getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) { … }
llvm::Value *
CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
llvm::IntegerType *ResType,
llvm::Value *EmittedE,
bool IsDynamic) { … }
const FieldDecl *CodeGenFunction::FindFlexibleArrayMemberFieldAndOffset(
ASTContext &Ctx, const RecordDecl *RD, const FieldDecl *FAMDecl,
uint64_t &Offset) { … }
static unsigned CountCountedByAttrs(const RecordDecl *RD) { … }
llvm::Value *
CodeGenFunction::emitFlexibleArrayMemberSize(const Expr *E, unsigned Type,
llvm::IntegerType *ResType) { … }
llvm::Value *
CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
llvm::IntegerType *ResType,
llvm::Value *EmittedE, bool IsDynamic) { … }
namespace {
struct BitTest { … };
}
BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) { … }
static char bitActionToX86BTCode(BitTest::ActionKind A) { … }
static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
BitTest BT,
const CallExpr *E, Value *BitBase,
Value *BitPos) { … }
static llvm::AtomicOrdering
getBitTestAtomicOrdering(BitTest::InterlockingKind I) { … }
static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
unsigned BuiltinID,
const CallExpr *E) { … }
static llvm::Value *emitPPCLoadReserveIntrinsic(CodeGenFunction &CGF,
unsigned BuiltinID,
const CallExpr *E) { … }
namespace {
enum class MSVCSetJmpKind { … };
}
static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind,
const CallExpr *E) { … }
enum class CodeGenFunction::MSVCIntrin { … };
static std::optional<CodeGenFunction::MSVCIntrin>
translateArmToMsvcIntrin(unsigned BuiltinID) { … }
static std::optional<CodeGenFunction::MSVCIntrin>
translateAarch64ToMsvcIntrin(unsigned BuiltinID) { … }
static std::optional<CodeGenFunction::MSVCIntrin>
translateX86ToMsvcIntrin(unsigned BuiltinID) { … }
Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
const CallExpr *E) { … }
namespace {
struct CallObjCArcUse final : EHScopeStack::Cleanup { … };
}
Value *CodeGenFunction::EmitCheckedArgForBuiltin(const Expr *E,
BuiltinCheckKind Kind) { … }
Value *CodeGenFunction::EmitCheckedArgForAssume(const Expr *E) { … }
static Value *EmitAbs(CodeGenFunction &CGF, Value *ArgValue, bool HasNSW) { … }
static Value *EmitOverflowCheckedAbs(CodeGenFunction &CGF, const CallExpr *E,
bool SanitizeOverflow) { … }
static CanQualType getOSLogArgType(ASTContext &C, int Size) { … }
llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
const analyze_os_log::OSLogBufferLayout &Layout,
CharUnits BufferAlignment) { … }
RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) { … }
static bool isSpecialUnsignedMultiplySignedResult(
unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info,
WidthAndSignedness ResultInfo) { … }
static RValue EmitCheckedUnsignedMultiplySignedResult(
CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info,
const clang::Expr *Op2, WidthAndSignedness Op2Info,
const clang::Expr *ResultArg, QualType ResultQTy,
WidthAndSignedness ResultInfo) { … }
static bool isSpecialMixedSignMultiply(unsigned BuiltinID,
WidthAndSignedness Op1Info,
WidthAndSignedness Op2Info,
WidthAndSignedness ResultInfo) { … }
static RValue
EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1,
WidthAndSignedness Op1Info, const clang::Expr *Op2,
WidthAndSignedness Op2Info,
const clang::Expr *ResultArg, QualType ResultQTy,
WidthAndSignedness ResultInfo) { … }
static bool
TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty,
llvm::SmallPtrSetImpl<const Decl *> &Seen) { … }
static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty) { … }
RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) { … }
static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID) { … }
static Value *tryUseTestFPKind(CodeGenFunction &CGF, unsigned BuiltinID,
Value *V) { … }
static RValue EmitHipStdParUnsupportedBuiltin(CodeGenFunction *CGF,
const FunctionDecl *FD) { … }
RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
const CallExpr *E,
ReturnValueSlot ReturnValue) { … }
static Value *EmitTargetArchBuiltinExpr(CodeGenFunction *CGF,
unsigned BuiltinID, const CallExpr *E,
ReturnValueSlot ReturnValue,
llvm::Triple::ArchType Arch) { … }
Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
const CallExpr *E,
ReturnValueSlot ReturnValue) { … }
static llvm::FixedVectorType *GetNeonType(CodeGenFunction *CGF,
NeonTypeFlags TypeFlags,
bool HasLegalHalfType = true,
bool V1Ty = false,
bool AllowBFloatArgsAndRet = true) { … }
static llvm::VectorType *GetFloatNeonType(CodeGenFunction *CGF,
NeonTypeFlags IntTypeFlags) { … }
Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C,
const ElementCount &Count) { … }
Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) { … }
Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
const char *name,
unsigned shift, bool rightshift) { … }
Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
bool neg) { … }
Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift,
llvm::Type *Ty, bool usgn,
const char *name) { … }
enum { … };
namespace {
struct ARMVectorIntrinsicInfo { … };
}
#define NEONMAP0 …
#define NEONMAP1 …
#define NEONMAP2 …
static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = …;
static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = …;
static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = …;
static const std::pair<unsigned, unsigned> NEONEquivalentIntrinsicMap[] = …;
#undef NEONMAP0
#undef NEONMAP1
#undef NEONMAP2
#define SVEMAP1 …
#define SVEMAP2 …
static const ARMVectorIntrinsicInfo AArch64SVEIntrinsicMap[] = …;
#undef SVEMAP1
#undef SVEMAP2
#define SMEMAP1 …
#define SMEMAP2 …
static const ARMVectorIntrinsicInfo AArch64SMEIntrinsicMap[] = …;
#undef SMEMAP1
#undef SMEMAP2
static bool NEONSIMDIntrinsicsProvenSorted = …;
static bool AArch64SIMDIntrinsicsProvenSorted = …;
static bool AArch64SISDIntrinsicsProvenSorted = …;
static bool AArch64SVEIntrinsicsProvenSorted = …;
static bool AArch64SMEIntrinsicsProvenSorted = …;
static const ARMVectorIntrinsicInfo *
findARMVectorIntrinsicInMap(ArrayRef<ARMVectorIntrinsicInfo> IntrinsicMap,
unsigned BuiltinID, bool &MapProvenSorted) { … }
Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
unsigned Modifier,
llvm::Type *ArgType,
const CallExpr *E) { … }
static Value *EmitCommonNeonSISDBuiltinExpr(
CodeGenFunction &CGF, const ARMVectorIntrinsicInfo &SISDInfo,
SmallVectorImpl<Value *> &Ops, const CallExpr *E) { … }
Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic,
const char *NameHint, unsigned Modifier, const CallExpr *E,
SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0, Address PtrOp1,
llvm::Triple::ArchType Arch) { … }
Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr(
Value *Op, llvm::Type *Ty, const CmpInst::Predicate Fp,
const CmpInst::Predicate Ip, const Twine &Name) { … }
static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
Value *ExtOp, Value *IndexOp,
llvm::Type *ResTy, unsigned IntID,
const char *Name) { … }
Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) { … }
enum SpecialRegisterAccessKind { … };
static Value *EmitAMDGCNBallotForExec(CodeGenFunction &CGF, const CallExpr *E,
llvm::Type *RegisterType,
llvm::Type *ValueType, bool isExecHi) { … }
static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
const CallExpr *E,
llvm::Type *RegisterType,
llvm::Type *ValueType,
SpecialRegisterAccessKind AccessKind,
StringRef SysReg = "") { … }
static bool HasExtraNeonArgument(unsigned BuiltinID) { … }
Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
const CallExpr *E,
ReturnValueSlot ReturnValue,
llvm::Triple::ArchType Arch) { … }
template<typename Integer>
static Integer GetIntegerConstantValue(const Expr *E, ASTContext &Context) { … }
static llvm::Value *SignOrZeroExtend(CGBuilderTy &Builder, llvm::Value *V,
llvm::Type *T, bool Unsigned) { … }
static llvm::Value *MVEImmediateShr(CGBuilderTy &Builder, llvm::Value *V,
uint32_t Shift, bool Unsigned) { … }
static llvm::Value *ARMMVEVectorSplat(CGBuilderTy &Builder, llvm::Value *V) { … }
static llvm::Value *ARMMVEVectorReinterpret(CGBuilderTy &Builder,
CodeGenFunction *CGF,
llvm::Value *V,
llvm::Type *DestType) { … }
static llvm::Value *VectorUnzip(CGBuilderTy &Builder, llvm::Value *V, bool Odd) { … }
static llvm::Value *VectorZip(CGBuilderTy &Builder, llvm::Value *V0,
llvm::Value *V1) { … }
template<unsigned HighBit, unsigned OtherBits>
static llvm::Value *ARMMVEConstantSplat(CGBuilderTy &Builder, llvm::Type *VT) { … }
static llvm::Value *ARMMVEVectorElementReverse(CGBuilderTy &Builder,
llvm::Value *V,
unsigned ReverseWidth) { … }
Value *CodeGenFunction::EmitARMMVEBuiltinExpr(unsigned BuiltinID,
const CallExpr *E,
ReturnValueSlot ReturnValue,
llvm::Triple::ArchType Arch) { … }
Value *CodeGenFunction::EmitARMCDEBuiltinExpr(unsigned BuiltinID,
const CallExpr *E,
ReturnValueSlot ReturnValue,
llvm::Triple::ArchType Arch) { … }
static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
const CallExpr *E,
SmallVectorImpl<Value *> &Ops,
llvm::Triple::ArchType Arch) { … }
Value *CodeGenFunction::vectorWrapScalar16(Value *Op) { … }
llvm::Type *CodeGenFunction::SVEBuiltinMemEltTy(const SVETypeFlags &TypeFlags) { … }
llvm::Type *CodeGenFunction::getEltType(const SVETypeFlags &TypeFlags) { … }
llvm::ScalableVectorType *
CodeGenFunction::getSVEPredType(const SVETypeFlags &TypeFlags) { … }
llvm::ScalableVectorType *
CodeGenFunction::getSVEType(const SVETypeFlags &TypeFlags) { … }
llvm::Value *
CodeGenFunction::EmitSVEAllTruePred(const SVETypeFlags &TypeFlags) { … }
constexpr unsigned SVEBitsPerBlock = …;
static llvm::ScalableVectorType *getSVEVectorForElementType(llvm::Type *EltTy) { … }
Value *CodeGenFunction::EmitSVEPredicateCast(Value *Pred,
llvm::ScalableVectorType *VTy) { … }
Value *CodeGenFunction::EmitSVEPredicateTupleCast(Value *PredTuple,
llvm::StructType *Ty) { … }
Value *CodeGenFunction::EmitSVEGatherLoad(const SVETypeFlags &TypeFlags,
SmallVectorImpl<Value *> &Ops,
unsigned IntID) { … }
Value *CodeGenFunction::EmitSVEScatterStore(const SVETypeFlags &TypeFlags,
SmallVectorImpl<Value *> &Ops,
unsigned IntID) { … }
Value *CodeGenFunction::EmitSVEGatherPrefetch(const SVETypeFlags &TypeFlags,
SmallVectorImpl<Value *> &Ops,
unsigned IntID) { … }
Value *CodeGenFunction::EmitSVEStructLoad(const SVETypeFlags &TypeFlags,
SmallVectorImpl<Value*> &Ops,
unsigned IntID) { … }
Value *CodeGenFunction::EmitSVEStructStore(const SVETypeFlags &TypeFlags,
SmallVectorImpl<Value*> &Ops,
unsigned IntID) { … }
Value *CodeGenFunction::EmitSVEPMull(const SVETypeFlags &TypeFlags,
SmallVectorImpl<Value *> &Ops,
unsigned BuiltinID) { … }
Value *CodeGenFunction::EmitSVEMovl(const SVETypeFlags &TypeFlags,
ArrayRef<Value *> Ops, unsigned BuiltinID) { … }
Value *CodeGenFunction::EmitSVEPrefetchLoad(const SVETypeFlags &TypeFlags,
SmallVectorImpl<Value *> &Ops,
unsigned BuiltinID) { … }
Value *CodeGenFunction::EmitSVEMaskedLoad(const CallExpr *E,
llvm::Type *ReturnTy,
SmallVectorImpl<Value *> &Ops,
unsigned IntrinsicID,
bool IsZExtReturn) { … }
Value *CodeGenFunction::EmitSVEMaskedStore(const CallExpr *E,
SmallVectorImpl<Value *> &Ops,
unsigned IntrinsicID) { … }
Value *CodeGenFunction::EmitSMELd1St1(const SVETypeFlags &TypeFlags,
SmallVectorImpl<Value *> &Ops,
unsigned IntID) { … }
Value *CodeGenFunction::EmitSMEReadWrite(const SVETypeFlags &TypeFlags,
SmallVectorImpl<Value *> &Ops,
unsigned IntID) { … }
Value *CodeGenFunction::EmitSMEZero(const SVETypeFlags &TypeFlags,
SmallVectorImpl<Value *> &Ops,
unsigned IntID) { … }
Value *CodeGenFunction::EmitSMELdrStr(const SVETypeFlags &TypeFlags,
SmallVectorImpl<Value *> &Ops,
unsigned IntID) { … }
Value *CodeGenFunction::EmitSVEDupX(Value *Scalar, llvm::Type *Ty) { … }
Value *CodeGenFunction::EmitSVEDupX(Value* Scalar) { … }
Value *CodeGenFunction::EmitSVEReinterpret(Value *Val, llvm::Type *Ty) { … }
static void InsertExplicitZeroOperand(CGBuilderTy &Builder, llvm::Type *Ty,
SmallVectorImpl<Value *> &Ops) { … }
static void InsertExplicitUndefOperand(CGBuilderTy &Builder, llvm::Type *Ty,
SmallVectorImpl<Value *> &Ops) { … }
SmallVector<llvm::Type *, 2>
CodeGenFunction::getSVEOverloadTypes(const SVETypeFlags &TypeFlags,
llvm::Type *ResultType,
ArrayRef<Value *> Ops) { … }
Value *CodeGenFunction::EmitSVETupleSetOrGet(const SVETypeFlags &TypeFlags,
ArrayRef<Value *> Ops) { … }
Value *CodeGenFunction::EmitSVETupleCreate(const SVETypeFlags &TypeFlags,
llvm::Type *Ty,
ArrayRef<Value *> Ops) { … }
void CodeGenFunction::GetAArch64SVEProcessedOperands(
unsigned BuiltinID, const CallExpr *E, SmallVectorImpl<Value *> &Ops,
SVETypeFlags TypeFlags) { … }
Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) { … }
static void swapCommutativeSMEOperands(unsigned BuiltinID,
SmallVectorImpl<Value *> &Ops) { … }
Value *CodeGenFunction::EmitAArch64SMEBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) { … }
Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
const CallExpr *E,
llvm::Triple::ArchType Arch) { … }Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) { … }llvm::Value *CodeGenFunction::
BuildVector(ArrayRef<llvm::Value*> Ops) { … }static Value *getMaskVecValue(CodeGenFunction &CGF, Value *Mask,
unsigned NumElts) { … }static Value *EmitX86MaskedStore(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
Align Alignment) { … }static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
Align Alignment) { … }static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
ArrayRef<Value *> Ops) { … }static Value *EmitX86CompressExpand(CodeGenFunction &CGF,
ArrayRef<Value *> Ops,
bool IsCompress) { … }static Value *EmitX86CompressStore(CodeGenFunction &CGF,
ArrayRef<Value *> Ops) { … }static Value *EmitX86MaskLogic(CodeGenFunction &CGF, Instruction::BinaryOps Opc,
ArrayRef<Value *> Ops,
bool InvertLHS = false) { … }static Value *EmitX86FunnelShift(CodeGenFunction &CGF, Value *Op0, Value *Op1,
Value *Amt, bool IsRight) { … }static Value *EmitX86vpcom(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
bool IsSigned) { … }static Value *EmitX86Select(CodeGenFunction &CGF,
Value *Mask, Value *Op0, Value *Op1) { … }static Value *EmitX86ScalarSelect(CodeGenFunction &CGF,
Value *Mask, Value *Op0, Value *Op1) { … }static Value *EmitX86MaskedCompareResult(CodeGenFunction &CGF, Value *Cmp,
unsigned NumElts, Value *MaskIn) { … }static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC,
bool Signed, ArrayRef<Value *> Ops) { … }static Value *EmitX86ConvertToMask(CodeGenFunction &CGF, Value *In) { … }static Value *EmitX86ConvertIntToFp(CodeGenFunction &CGF, const CallExpr *E,
ArrayRef<Value *> Ops, bool IsSigned) { … }static Value *EmitX86FMAExpr(CodeGenFunction &CGF, const CallExpr *E,
ArrayRef<Value *> Ops, unsigned BuiltinID,
bool IsAddSub) { … }static Value *EmitScalarFMAExpr(CodeGenFunction &CGF, const CallExpr *E,
MutableArrayRef<Value *> Ops, Value *Upper,
bool ZeroMask = false, unsigned PTIdx = 0,
bool NegAcc = false) { … }static Value *EmitX86Muldq(CodeGenFunction &CGF, bool IsSigned,
ArrayRef<Value *> Ops) { … }static Value *EmitX86Ternlog(CodeGenFunction &CGF, bool ZeroMask,
ArrayRef<Value *> Ops) { … }static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op,
llvm::Type *DstTy) { … }Value *CodeGenFunction::EmitX86CpuIs(const CallExpr *E) { … }static Value *EmitX86CvtF16ToFloatExpr(CodeGenFunction &CGF,
ArrayRef<Value *> Ops,
llvm::Type *DstTy) { … }Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) { … }Value *CodeGenFunction::EmitX86CpuSupports(const CallExpr *E) { … }Value *CodeGenFunction::EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs) { … }llvm::Value *
CodeGenFunction::EmitX86CpuSupports(std::array<uint32_t, 4> FeatureMask) { … }Value *CodeGenFunction::EmitAArch64CpuInit() { … }Value *CodeGenFunction::EmitRISCVCpuInit() { … }Value *CodeGenFunction::EmitX86CpuInit() { … }Value *CodeGenFunction::EmitAArch64CpuSupports(const CallExpr *E) { … }llvm::Value *
CodeGenFunction::EmitAArch64CpuSupports(ArrayRef<StringRef> FeaturesStrs) { … }Value *CodeGenFunction::EmitRISCVCpuSupports(const CallExpr *E) { … }static Value *loadRISCVFeatureBits(unsigned Index, CGBuilderTy &Builder,
CodeGenModule &CGM) { … }Value *CodeGenFunction::EmitRISCVCpuSupports(ArrayRef<StringRef> FeaturesStrs) { … }Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
const CallExpr *E) { … }Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) { … }Value *EmitAMDGPUDispatchPtr(CodeGenFunction &CGF,
const CallExpr *E = nullptr) { … }Value *EmitAMDGPUImplicitArgPtr(CodeGenFunction &CGF) { … }Value *EmitAMDGPUWorkGroupSize(CodeGenFunction &CGF, unsigned Index) { … }Value *EmitAMDGPUGridSize(CodeGenFunction &CGF, unsigned Index) { … }void CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope,
llvm::AtomicOrdering &AO,
llvm::SyncScope::ID &SSID) { … }llvm::Value *CodeGenFunction::EmitScalarOrConstFoldImmArg(unsigned ICEArguments,
unsigned Idx,
const CallExpr *E) { … }static Intrinsic::ID getDotProductIntrinsic(CGHLSLRuntime &RT, QualType QT) { … }Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
const CallExpr *E,
ReturnValueSlot ReturnValue) { … }void CodeGenFunction::AddAMDGPUFenceAddressSpaceMMRA(llvm::Instruction *Inst,
const CallExpr *E) { … }Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) { … }static Value *EmitSystemZIntrinsicWithCC(CodeGenFunction &CGF,
unsigned IntrinsicID,
const CallExpr *E) { … }Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) { … }struct NVPTXMmaLdstInfo { … }#define MMA_INTR …#define MMA_LDST …static NVPTXMmaLdstInfo getNVPTXMmaLdstInfo(unsigned BuiltinID) { … }#undef MMA_LDST#undef MMA_INTRstruct NVPTXMmaInfo { … }static NVPTXMmaInfo getNVPTXMmaInfo(unsigned BuiltinID) { … }static Value *MakeLdgLdu(unsigned IntrinsicID, CodeGenFunction &CGF,
const CallExpr *E) { … }static Value *MakeScopedAtomic(unsigned IntrinsicID, CodeGenFunction &CGF,
const CallExpr *E) { … }static Value *MakeCpAsync(unsigned IntrinsicID, unsigned IntrinsicIDS,
CodeGenFunction &CGF, const CallExpr *E,
int SrcSize) { … }static Value *MakeHalfType(unsigned IntrinsicID, unsigned BuiltinID,
const CallExpr *E, CodeGenFunction &CGF) { … }Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) { … }struct BuiltinAlignArgs { … }RValue CodeGenFunction::EmitBuiltinIsAligned(const CallExpr *E) { … }RValue CodeGenFunction::EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp) { … }Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) { … }static std::pair<Intrinsic::ID, unsigned>
getIntrinsicForHexagonNonClangBuiltin(unsigned BuiltinID) { … }Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) { … }Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
const CallExpr *E,
ReturnValueSlot ReturnValue) { … }