llvm/clang/lib/CodeGen/CGBuiltin.cpp

//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This contains code to emit Builtin calls as LLVM code.
//
//===----------------------------------------------------------------------===//

#include "ABIInfo.h"
#include "CGCUDARuntime.h"
#include "CGCXXABI.h"
#include "CGHLSLRuntime.h"
#include "CGObjCRuntime.h"
#include "CGOpenCLRuntime.h"
#include "CGRecordLayout.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "ConstantEmitter.h"
#include "PatternInit.h"
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/OSLog.h"
#include "clang/AST/OperationKinds.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/FloatingPointMode.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsAArch64.h"
#include "llvm/IR/IntrinsicsAMDGPU.h"
#include "llvm/IR/IntrinsicsARM.h"
#include "llvm/IR/IntrinsicsBPF.h"
#include "llvm/IR/IntrinsicsDirectX.h"
#include "llvm/IR/IntrinsicsHexagon.h"
#include "llvm/IR/IntrinsicsNVPTX.h"
#include "llvm/IR/IntrinsicsPowerPC.h"
#include "llvm/IR/IntrinsicsR600.h"
#include "llvm/IR/IntrinsicsRISCV.h"
#include "llvm/IR/IntrinsicsS390.h"
#include "llvm/IR/IntrinsicsVE.h"
#include "llvm/IR/IntrinsicsWebAssembly.h"
#include "llvm/IR/IntrinsicsX86.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/MatrixBuilder.h"
#include "llvm/IR/MemoryModelRelaxationAnnotations.h"
#include "llvm/Support/AMDGPUAddrSpace.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/ScopedPrinter.h"
#include "llvm/TargetParser/AArch64TargetParser.h"
#include "llvm/TargetParser/RISCVISAInfo.h"
#include "llvm/TargetParser/X86TargetParser.h"
#include <optional>
#include <sstream>

usingnamespaceclang;
usingnamespaceCodeGen;
usingnamespacellvm;

static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size,
                             Align AlignmentInBytes) {}

/// getBuiltinLibFunction - Given a builtin id for a function like
/// "__builtin_fabsf", return a Function* for "fabsf".
llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
                                                     unsigned BuiltinID) {}

/// Emit the conversions required to turn the given value into an
/// integer of the given size.
static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
                        QualType T, llvm::IntegerType *IntType) {}

static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
                          QualType T, llvm::Type *ResultType) {}

static Address CheckAtomicAlignment(CodeGenFunction &CGF, const CallExpr *E) {}

/// Utility to insert an atomic instruction based on Intrinsic::ID
/// and the expression node.
static Value *MakeBinaryAtomicValue(
    CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E,
    AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {}

static Value *EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E) {}

static Value *EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E) {}

static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
                               llvm::AtomicRMWInst::BinOp Kind,
                               const CallExpr *E) {}

/// Utility to insert an atomic instruction based Intrinsic::ID and
/// the expression node, where the return value is the result of the
/// operation.
static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
                                   llvm::AtomicRMWInst::BinOp Kind,
                                   const CallExpr *E,
                                   Instruction::BinaryOps Op,
                                   bool Invert = false) {}

/// Utility to insert an atomic cmpxchg instruction.
///
/// @param CGF The current codegen function.
/// @param E   Builtin call expression to convert to cmpxchg.
///            arg0 - address to operate on
///            arg1 - value to compare with
///            arg2 - new value
/// @param ReturnBool Specifies whether to return success flag of
///                   cmpxchg result or the old value.
///
/// @returns result of cmpxchg, according to ReturnBool
///
/// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics
/// invoke the function EmitAtomicCmpXchgForMSIntrin.
static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E,
                                     bool ReturnBool) {}

/// This function should be invoked to emit atomic cmpxchg for Microsoft's
/// _InterlockedCompareExchange* intrinsics which have the following signature:
/// T _InterlockedCompareExchange(T volatile *Destination,
///                               T Exchange,
///                               T Comparand);
///
/// Whereas the llvm 'cmpxchg' instruction has the following syntax:
/// cmpxchg *Destination, Comparand, Exchange.
/// So we need to swap Comparand and Exchange when invoking
/// CreateAtomicCmpXchg. That is the reason we could not use the above utility
/// function MakeAtomicCmpXchgValue since it expects the arguments to be
/// already swapped.

static
Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E,
    AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) {}

// 64-bit Microsoft platforms support 128 bit cmpxchg operations. They are
// prototyped like this:
//
// unsigned char _InterlockedCompareExchange128...(
//     __int64 volatile * _Destination,
//     __int64 _ExchangeHigh,
//     __int64 _ExchangeLow,
//     __int64 * _ComparandResult);
//
// Note that Destination is assumed to be at least 16-byte aligned, despite
// being typed int64.

static Value *EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction &CGF,
                                              const CallExpr *E,
                                              AtomicOrdering SuccessOrdering) {}

static Value *EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E,
    AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {}

static Value *EmitAtomicDecrementValue(
    CodeGenFunction &CGF, const CallExpr *E,
    AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {}

// Build a plain volatile load.
static Value *EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E) {}

// Build a plain volatile store.
static Value *EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E) {}

// Emit a simple mangled intrinsic that has 1 argument and a return type
// matching the argument type. Depending on mode, this may be a constrained
// floating-point intrinsic.
static Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
                                const CallExpr *E, unsigned IntrinsicID,
                                unsigned ConstrainedIntrinsicID) {}

// Emit an intrinsic that has 2 operands of the same type as its result.
// Depending on mode, this may be a constrained floating-point intrinsic.
static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
                                const CallExpr *E, unsigned IntrinsicID,
                                unsigned ConstrainedIntrinsicID) {}

// Has second type mangled argument.
static Value *emitBinaryExpMaybeConstrainedFPBuiltin(
    CodeGenFunction &CGF, const CallExpr *E, llvm::Intrinsic::ID IntrinsicID,
    llvm::Intrinsic::ID ConstrainedIntrinsicID) {}

// Emit an intrinsic that has 3 operands of the same type as its result.
// Depending on mode, this may be a constrained floating-point intrinsic.
static Value *emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
                                 const CallExpr *E, unsigned IntrinsicID,
                                 unsigned ConstrainedIntrinsicID) {}

// Emit an intrinsic where all operands are of the same type as the result.
// Depending on mode, this may be a constrained floating-point intrinsic.
static Value *emitCallMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
                                                unsigned IntrinsicID,
                                                unsigned ConstrainedIntrinsicID,
                                                llvm::Type *Ty,
                                                ArrayRef<Value *> Args) {}

// Emit a simple intrinsic that has N scalar arguments and a return type
// matching the argument type. It is assumed that only the first argument is
// overloaded.
template <unsigned N>
static Value *emitBuiltinWithOneOverloadedType(CodeGenFunction &CGF,
                                               const CallExpr *E,
                                               unsigned IntrinsicID,
                                               llvm::StringRef Name = "") {}

// Emit an intrinsic that has 1 float or double operand, and 1 integer.
static Value *emitFPIntBuiltin(CodeGenFunction &CGF,
                               const CallExpr *E,
                               unsigned IntrinsicID) {}

// Emit an intrinsic that has overloaded integer result and fp operand.
static Value *
emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E,
                                        unsigned IntrinsicID,
                                        unsigned ConstrainedIntrinsicID) {}

static Value *emitFrexpBuiltin(CodeGenFunction &CGF, const CallExpr *E,
                               llvm::Intrinsic::ID IntrinsicID) {}

/// EmitFAbs - Emit a call to @llvm.fabs().
static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) {}

/// Emit the computation of the sign bit for a floating point value. Returns
/// the i1 sign bit value.
static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) {}

/// Checks no arguments or results are passed indirectly in the ABI (i.e. via a
/// hidden pointer). This is used to check annotating FP libcalls (that could
/// set `errno`) with "int" TBAA metadata is safe. If any floating-point
/// arguments are passed indirectly, setup for the call could be incorrectly
/// optimized out.
static bool HasNoIndirectArgumentsOrResults(CGFunctionInfo const &FnInfo) {}

static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *FD,
                              const CallExpr *E, llvm::Constant *calleeValue) {}

/// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
/// depending on IntrinsicID.
///
/// \arg CGF The current codegen function.
/// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
/// \arg X The first argument to the llvm.*.with.overflow.*.
/// \arg Y The second argument to the llvm.*.with.overflow.*.
/// \arg Carry The carry returned by the llvm.*.with.overflow.*.
/// \returns The result (i.e. sum/product) returned by the intrinsic.
static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
                                          const llvm::Intrinsic::ID IntrinsicID,
                                          llvm::Value *X, llvm::Value *Y,
                                          llvm::Value *&Carry) {}

static Value *emitRangedBuiltin(CodeGenFunction &CGF, unsigned IntrinsicID,
                                int low, int high) {}

namespace {
  struct WidthAndSignedness {};
}

static WidthAndSignedness
getIntegerWidthAndSignedness(const clang::ASTContext &context,
                             const clang::QualType Type) {}

// Given one or more integer types, this function produces an integer type that
// encompasses them: any value in one of the given types could be expressed in
// the encompassing type.
static struct WidthAndSignedness
EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {}

Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {}

/// Checks if using the result of __builtin_object_size(p, @p From) in place of
/// __builtin_object_size(p, @p To) is correct
static bool areBOSTypesCompatible(int From, int To) {}

static llvm::Value *
getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) {}

llvm::Value *
CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
                                                 llvm::IntegerType *ResType,
                                                 llvm::Value *EmittedE,
                                                 bool IsDynamic) {}

const FieldDecl *CodeGenFunction::FindFlexibleArrayMemberFieldAndOffset(
    ASTContext &Ctx, const RecordDecl *RD, const FieldDecl *FAMDecl,
    uint64_t &Offset) {}

static unsigned CountCountedByAttrs(const RecordDecl *RD) {}

llvm::Value *
CodeGenFunction::emitFlexibleArrayMemberSize(const Expr *E, unsigned Type,
                                             llvm::IntegerType *ResType) {}

/// Returns a Value corresponding to the size of the given expression.
/// This Value may be either of the following:
///   - A llvm::Argument (if E is a param with the pass_object_size attribute on
///     it)
///   - A call to the @llvm.objectsize intrinsic
///
/// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null
/// and we wouldn't otherwise try to reference a pass_object_size parameter,
/// we'll call @llvm.objectsize on EmittedE, rather than emitting E.
llvm::Value *
CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
                                       llvm::IntegerType *ResType,
                                       llvm::Value *EmittedE, bool IsDynamic) {}

namespace {
/// A struct to generically describe a bit test intrinsic.
struct BitTest {};

} // namespace

BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) {}

static char bitActionToX86BTCode(BitTest::ActionKind A) {}

static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
                                            BitTest BT,
                                            const CallExpr *E, Value *BitBase,
                                            Value *BitPos) {}

static llvm::AtomicOrdering
getBitTestAtomicOrdering(BitTest::InterlockingKind I) {}

/// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of
/// bits and a bit position and read and optionally modify the bit at that
/// position. The position index can be arbitrarily large, i.e. it can be larger
/// than 31 or 63, so we need an indexed load in the general case.
static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
                                         unsigned BuiltinID,
                                         const CallExpr *E) {}

static llvm::Value *emitPPCLoadReserveIntrinsic(CodeGenFunction &CGF,
                                                unsigned BuiltinID,
                                                const CallExpr *E) {}

namespace {
enum class MSVCSetJmpKind {};
}

/// MSVC handles setjmp a bit differently on different platforms. On every
/// architecture except 32-bit x86, the frame address is passed. On x86, extra
/// parameters can be passed as variadic arguments, but we always pass none.
static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind,
                               const CallExpr *E) {}

// Many of MSVC builtins are on x64, ARM and AArch64; to avoid repeating code,
// we handle them here.
enum class CodeGenFunction::MSVCIntrin {};

static std::optional<CodeGenFunction::MSVCIntrin>
translateArmToMsvcIntrin(unsigned BuiltinID) {}

static std::optional<CodeGenFunction::MSVCIntrin>
translateAarch64ToMsvcIntrin(unsigned BuiltinID) {}

static std::optional<CodeGenFunction::MSVCIntrin>
translateX86ToMsvcIntrin(unsigned BuiltinID) {}

// Emit an MSVC intrinsic. Assumes that arguments have *not* been evaluated.
Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
                                            const CallExpr *E) {}

namespace {
// ARC cleanup for __builtin_os_log_format
struct CallObjCArcUse final : EHScopeStack::Cleanup {};
}

Value *CodeGenFunction::EmitCheckedArgForBuiltin(const Expr *E,
                                                 BuiltinCheckKind Kind) {}

Value *CodeGenFunction::EmitCheckedArgForAssume(const Expr *E) {}

static Value *EmitAbs(CodeGenFunction &CGF, Value *ArgValue, bool HasNSW) {}

static Value *EmitOverflowCheckedAbs(CodeGenFunction &CGF, const CallExpr *E,
                                     bool SanitizeOverflow) {}

/// Get the argument type for arguments to os_log_helper.
static CanQualType getOSLogArgType(ASTContext &C, int Size) {}

llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
    const analyze_os_log::OSLogBufferLayout &Layout,
    CharUnits BufferAlignment) {}

RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) {}

static bool isSpecialUnsignedMultiplySignedResult(
    unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info,
    WidthAndSignedness ResultInfo) {}

static RValue EmitCheckedUnsignedMultiplySignedResult(
    CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info,
    const clang::Expr *Op2, WidthAndSignedness Op2Info,
    const clang::Expr *ResultArg, QualType ResultQTy,
    WidthAndSignedness ResultInfo) {}

/// Determine if a binop is a checked mixed-sign multiply we can specialize.
static bool isSpecialMixedSignMultiply(unsigned BuiltinID,
                                       WidthAndSignedness Op1Info,
                                       WidthAndSignedness Op2Info,
                                       WidthAndSignedness ResultInfo) {}

/// Emit a checked mixed-sign multiply. This is a cheaper specialization of
/// the generic checked-binop irgen.
static RValue
EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1,
                             WidthAndSignedness Op1Info, const clang::Expr *Op2,
                             WidthAndSignedness Op2Info,
                             const clang::Expr *ResultArg, QualType ResultQTy,
                             WidthAndSignedness ResultInfo) {}

static bool
TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty,
                              llvm::SmallPtrSetImpl<const Decl *> &Seen) {}

/// Determine if the specified type requires laundering by checking if it is a
/// dynamic class type or contains a subobject which is a dynamic class type.
static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty) {}

RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) {}

// Map math builtins for long-double to f128 version.
static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID) {}

static Value *tryUseTestFPKind(CodeGenFunction &CGF, unsigned BuiltinID,
                               Value *V) {}

static RValue EmitHipStdParUnsupportedBuiltin(CodeGenFunction *CGF,
                                              const FunctionDecl *FD) {}

RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
                                        const CallExpr *E,
                                        ReturnValueSlot ReturnValue) {}

static Value *EmitTargetArchBuiltinExpr(CodeGenFunction *CGF,
                                        unsigned BuiltinID, const CallExpr *E,
                                        ReturnValueSlot ReturnValue,
                                        llvm::Triple::ArchType Arch) {}

Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
                                              const CallExpr *E,
                                              ReturnValueSlot ReturnValue) {}

static llvm::FixedVectorType *GetNeonType(CodeGenFunction *CGF,
                                          NeonTypeFlags TypeFlags,
                                          bool HasLegalHalfType = true,
                                          bool V1Ty = false,
                                          bool AllowBFloatArgsAndRet = true) {}

static llvm::VectorType *GetFloatNeonType(CodeGenFunction *CGF,
                                          NeonTypeFlags IntTypeFlags) {}

Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C,
                                      const ElementCount &Count) {}

Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {}

Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
                                     const char *name,
                                     unsigned shift, bool rightshift) {}

Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
                                            bool neg) {}

// Right-shift a vector by a constant.
Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift,
                                          llvm::Type *Ty, bool usgn,
                                          const char *name) {}

enum {};

namespace {
struct ARMVectorIntrinsicInfo {};
} // end anonymous namespace

#define NEONMAP0

#define NEONMAP1

#define NEONMAP2

static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] =;

static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] =;

static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] =;

// Some intrinsics are equivalent for codegen.
static const std::pair<unsigned, unsigned> NEONEquivalentIntrinsicMap[] =;

#undef NEONMAP0
#undef NEONMAP1
#undef NEONMAP2

#define SVEMAP1

#define SVEMAP2
static const ARMVectorIntrinsicInfo AArch64SVEIntrinsicMap[] =;

#undef SVEMAP1
#undef SVEMAP2

#define SMEMAP1

#define SMEMAP2
static const ARMVectorIntrinsicInfo AArch64SMEIntrinsicMap[] =;

#undef SMEMAP1
#undef SMEMAP2

static bool NEONSIMDIntrinsicsProvenSorted =;

static bool AArch64SIMDIntrinsicsProvenSorted =;
static bool AArch64SISDIntrinsicsProvenSorted =;
static bool AArch64SVEIntrinsicsProvenSorted =;
static bool AArch64SMEIntrinsicsProvenSorted =;

static const ARMVectorIntrinsicInfo *
findARMVectorIntrinsicInMap(ArrayRef<ARMVectorIntrinsicInfo> IntrinsicMap,
                            unsigned BuiltinID, bool &MapProvenSorted) {}

Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
                                                   unsigned Modifier,
                                                   llvm::Type *ArgType,
                                                   const CallExpr *E) {}

static Value *EmitCommonNeonSISDBuiltinExpr(
    CodeGenFunction &CGF, const ARMVectorIntrinsicInfo &SISDInfo,
    SmallVectorImpl<Value *> &Ops, const CallExpr *E) {}

Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
    unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic,
    const char *NameHint, unsigned Modifier, const CallExpr *E,
    SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0, Address PtrOp1,
    llvm::Triple::ArchType Arch) {}

Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr(
    Value *Op, llvm::Type *Ty, const CmpInst::Predicate Fp,
    const CmpInst::Predicate Ip, const Twine &Name) {}

static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
                                 Value *ExtOp, Value *IndexOp,
                                 llvm::Type *ResTy, unsigned IntID,
                                 const char *Name) {}

Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) {}

enum SpecialRegisterAccessKind {};

// Generates the IR for __builtin_read_exec_*.
// Lowers the builtin to amdgcn_ballot intrinsic.
static Value *EmitAMDGCNBallotForExec(CodeGenFunction &CGF, const CallExpr *E,
                                      llvm::Type *RegisterType,
                                      llvm::Type *ValueType, bool isExecHi) {}

// Generates the IR for the read/write special register builtin,
// ValueType is the type of the value that is to be written or read,
// RegisterType is the type of the register being written to or read from.
static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
                                         const CallExpr *E,
                                         llvm::Type *RegisterType,
                                         llvm::Type *ValueType,
                                         SpecialRegisterAccessKind AccessKind,
                                         StringRef SysReg = "") {}

/// Return true if BuiltinID is an overloaded Neon intrinsic with an extra
/// argument that specifies the vector type.
static bool HasExtraNeonArgument(unsigned BuiltinID) {}

Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
                                           const CallExpr *E,
                                           ReturnValueSlot ReturnValue,
                                           llvm::Triple::ArchType Arch) {}

template<typename Integer>
static Integer GetIntegerConstantValue(const Expr *E, ASTContext &Context) {}

static llvm::Value *SignOrZeroExtend(CGBuilderTy &Builder, llvm::Value *V,
                                     llvm::Type *T, bool Unsigned) {}

static llvm::Value *MVEImmediateShr(CGBuilderTy &Builder, llvm::Value *V,
                                    uint32_t Shift, bool Unsigned) {}

static llvm::Value *ARMMVEVectorSplat(CGBuilderTy &Builder, llvm::Value *V) {}

static llvm::Value *ARMMVEVectorReinterpret(CGBuilderTy &Builder,
                                            CodeGenFunction *CGF,
                                            llvm::Value *V,
                                            llvm::Type *DestType) {}

static llvm::Value *VectorUnzip(CGBuilderTy &Builder, llvm::Value *V, bool Odd) {}

static llvm::Value *VectorZip(CGBuilderTy &Builder, llvm::Value *V0,
                              llvm::Value *V1) {}

template<unsigned HighBit, unsigned OtherBits>
static llvm::Value *ARMMVEConstantSplat(CGBuilderTy &Builder, llvm::Type *VT) {}

static llvm::Value *ARMMVEVectorElementReverse(CGBuilderTy &Builder,
                                               llvm::Value *V,
                                               unsigned ReverseWidth) {}

Value *CodeGenFunction::EmitARMMVEBuiltinExpr(unsigned BuiltinID,
                                              const CallExpr *E,
                                              ReturnValueSlot ReturnValue,
                                              llvm::Triple::ArchType Arch) {}

Value *CodeGenFunction::EmitARMCDEBuiltinExpr(unsigned BuiltinID,
                                              const CallExpr *E,
                                              ReturnValueSlot ReturnValue,
                                              llvm::Triple::ArchType Arch) {}

static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
                                      const CallExpr *E,
                                      SmallVectorImpl<Value *> &Ops,
                                      llvm::Triple::ArchType Arch) {}

Value *CodeGenFunction::vectorWrapScalar16(Value *Op) {}

/// SVEBuiltinMemEltTy - Returns the memory element type for this memory
/// access builtin.  Only required if it can't be inferred from the base pointer
/// operand.
llvm::Type *CodeGenFunction::SVEBuiltinMemEltTy(const SVETypeFlags &TypeFlags) {}

llvm::Type *CodeGenFunction::getEltType(const SVETypeFlags &TypeFlags) {}

// Return the llvm predicate vector type corresponding to the specified element
// TypeFlags.
llvm::ScalableVectorType *
CodeGenFunction::getSVEPredType(const SVETypeFlags &TypeFlags) {}

// Return the llvm vector type corresponding to the specified element TypeFlags.
llvm::ScalableVectorType *
CodeGenFunction::getSVEType(const SVETypeFlags &TypeFlags) {}

llvm::Value *
CodeGenFunction::EmitSVEAllTruePred(const SVETypeFlags &TypeFlags) {}

constexpr unsigned SVEBitsPerBlock =;

static llvm::ScalableVectorType *getSVEVectorForElementType(llvm::Type *EltTy) {}

// Reinterpret the input predicate so that it can be used to correctly isolate
// the elements of the specified datatype.
Value *CodeGenFunction::EmitSVEPredicateCast(Value *Pred,
                                             llvm::ScalableVectorType *VTy) {}

Value *CodeGenFunction::EmitSVEPredicateTupleCast(Value *PredTuple,
                                                  llvm::StructType *Ty) {}

Value *CodeGenFunction::EmitSVEGatherLoad(const SVETypeFlags &TypeFlags,
                                          SmallVectorImpl<Value *> &Ops,
                                          unsigned IntID) {}

Value *CodeGenFunction::EmitSVEScatterStore(const SVETypeFlags &TypeFlags,
                                            SmallVectorImpl<Value *> &Ops,
                                            unsigned IntID) {}

Value *CodeGenFunction::EmitSVEGatherPrefetch(const SVETypeFlags &TypeFlags,
                                              SmallVectorImpl<Value *> &Ops,
                                              unsigned IntID) {}

Value *CodeGenFunction::EmitSVEStructLoad(const SVETypeFlags &TypeFlags,
                                          SmallVectorImpl<Value*> &Ops,
                                          unsigned IntID) {}

Value *CodeGenFunction::EmitSVEStructStore(const SVETypeFlags &TypeFlags,
                                           SmallVectorImpl<Value*> &Ops,
                                           unsigned IntID) {}

// SVE2's svpmullb and svpmullt builtins are similar to the svpmullb_pair and
// svpmullt_pair intrinsics, with the exception that their results are bitcast
// to a wider type.
Value *CodeGenFunction::EmitSVEPMull(const SVETypeFlags &TypeFlags,
                                     SmallVectorImpl<Value *> &Ops,
                                     unsigned BuiltinID) {}

Value *CodeGenFunction::EmitSVEMovl(const SVETypeFlags &TypeFlags,
                                    ArrayRef<Value *> Ops, unsigned BuiltinID) {}

Value *CodeGenFunction::EmitSVEPrefetchLoad(const SVETypeFlags &TypeFlags,
                                            SmallVectorImpl<Value *> &Ops,
                                            unsigned BuiltinID) {}

Value *CodeGenFunction::EmitSVEMaskedLoad(const CallExpr *E,
                                          llvm::Type *ReturnTy,
                                          SmallVectorImpl<Value *> &Ops,
                                          unsigned IntrinsicID,
                                          bool IsZExtReturn) {}

Value *CodeGenFunction::EmitSVEMaskedStore(const CallExpr *E,
                                           SmallVectorImpl<Value *> &Ops,
                                           unsigned IntrinsicID) {}

Value *CodeGenFunction::EmitSMELd1St1(const SVETypeFlags &TypeFlags,
                                      SmallVectorImpl<Value *> &Ops,
                                      unsigned IntID) {}

Value *CodeGenFunction::EmitSMEReadWrite(const SVETypeFlags &TypeFlags,
                                         SmallVectorImpl<Value *> &Ops,
                                         unsigned IntID) {}

Value *CodeGenFunction::EmitSMEZero(const SVETypeFlags &TypeFlags,
                                    SmallVectorImpl<Value *> &Ops,
                                    unsigned IntID) {}

Value *CodeGenFunction::EmitSMELdrStr(const SVETypeFlags &TypeFlags,
                                      SmallVectorImpl<Value *> &Ops,
                                      unsigned IntID) {}

// Limit the usage of scalable llvm IR generated by the ACLE by using the
// sve dup.x intrinsic instead of IRBuilder::CreateVectorSplat.
Value *CodeGenFunction::EmitSVEDupX(Value *Scalar, llvm::Type *Ty) {}

Value *CodeGenFunction::EmitSVEDupX(Value* Scalar) {}

Value *CodeGenFunction::EmitSVEReinterpret(Value *Val, llvm::Type *Ty) {}

static void InsertExplicitZeroOperand(CGBuilderTy &Builder, llvm::Type *Ty,
                                      SmallVectorImpl<Value *> &Ops) {}

static void InsertExplicitUndefOperand(CGBuilderTy &Builder, llvm::Type *Ty,
                                       SmallVectorImpl<Value *> &Ops) {}

SmallVector<llvm::Type *, 2>
CodeGenFunction::getSVEOverloadTypes(const SVETypeFlags &TypeFlags,
                                     llvm::Type *ResultType,
                                     ArrayRef<Value *> Ops) {}

Value *CodeGenFunction::EmitSVETupleSetOrGet(const SVETypeFlags &TypeFlags,
                                             ArrayRef<Value *> Ops) {}

Value *CodeGenFunction::EmitSVETupleCreate(const SVETypeFlags &TypeFlags,
                                           llvm::Type *Ty,
                                           ArrayRef<Value *> Ops) {}

void CodeGenFunction::GetAArch64SVEProcessedOperands(
    unsigned BuiltinID, const CallExpr *E, SmallVectorImpl<Value *> &Ops,
    SVETypeFlags TypeFlags) {}

Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
                                                  const CallExpr *E) {}

static void swapCommutativeSMEOperands(unsigned BuiltinID,
                                       SmallVectorImpl<Value *> &Ops) {}

Value *CodeGenFunction::EmitAArch64SMEBuiltinExpr(unsigned BuiltinID,
                                                  const CallExpr *E) {}

Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
                                               const CallExpr *E,
                                               llvm::Triple::ArchType Arch) {}Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID,
                                           const CallExpr *E) {}llvm::Value *CodeGenFunction::
BuildVector(ArrayRef<llvm::Value*> Ops) {}static Value *getMaskVecValue(CodeGenFunction &CGF, Value *Mask,
                              unsigned NumElts) {}static Value *EmitX86MaskedStore(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
                                 Align Alignment) {}static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
                                Align Alignment) {}static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
                                ArrayRef<Value *> Ops) {}static Value *EmitX86CompressExpand(CodeGenFunction &CGF,
                                    ArrayRef<Value *> Ops,
                                    bool IsCompress) {}static Value *EmitX86CompressStore(CodeGenFunction &CGF,
                                   ArrayRef<Value *> Ops) {}static Value *EmitX86MaskLogic(CodeGenFunction &CGF, Instruction::BinaryOps Opc,
                              ArrayRef<Value *> Ops,
                              bool InvertLHS = false) {}static Value *EmitX86FunnelShift(CodeGenFunction &CGF, Value *Op0, Value *Op1,
                                 Value *Amt, bool IsRight) {}static Value *EmitX86vpcom(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
                           bool IsSigned) {}static Value *EmitX86Select(CodeGenFunction &CGF,
                            Value *Mask, Value *Op0, Value *Op1) {}static Value *EmitX86ScalarSelect(CodeGenFunction &CGF,
                                  Value *Mask, Value *Op0, Value *Op1) {}static Value *EmitX86MaskedCompareResult(CodeGenFunction &CGF, Value *Cmp,
                                         unsigned NumElts, Value *MaskIn) {}static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC,
                                   bool Signed, ArrayRef<Value *> Ops) {}static Value *EmitX86ConvertToMask(CodeGenFunction &CGF, Value *In) {}static Value *EmitX86ConvertIntToFp(CodeGenFunction &CGF, const CallExpr *E,
                                    ArrayRef<Value *> Ops, bool IsSigned) {}static Value *EmitX86FMAExpr(CodeGenFunction &CGF, const CallExpr *E,
                             ArrayRef<Value *> Ops, unsigned BuiltinID,
                             bool IsAddSub) {}static Value *EmitScalarFMAExpr(CodeGenFunction &CGF, const CallExpr *E,
                                MutableArrayRef<Value *> Ops, Value *Upper,
                                bool ZeroMask = false, unsigned PTIdx = 0,
                                bool NegAcc = false) {}static Value *EmitX86Muldq(CodeGenFunction &CGF, bool IsSigned,
                           ArrayRef<Value *> Ops) {}static Value *EmitX86Ternlog(CodeGenFunction &CGF, bool ZeroMask,
                             ArrayRef<Value *> Ops) {}static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op,
                              llvm::Type *DstTy) {}Value *CodeGenFunction::EmitX86CpuIs(const CallExpr *E) {}static Value *EmitX86CvtF16ToFloatExpr(CodeGenFunction &CGF,
                                       ArrayRef<Value *> Ops,
                                       llvm::Type *DstTy) {}Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) {}Value *CodeGenFunction::EmitX86CpuSupports(const CallExpr *E) {}Value *CodeGenFunction::EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs) {}llvm::Value *
CodeGenFunction::EmitX86CpuSupports(std::array<uint32_t, 4> FeatureMask) {}Value *CodeGenFunction::EmitAArch64CpuInit() {}Value *CodeGenFunction::EmitRISCVCpuInit() {}Value *CodeGenFunction::EmitX86CpuInit() {}Value *CodeGenFunction::EmitAArch64CpuSupports(const CallExpr *E) {}llvm::Value *
CodeGenFunction::EmitAArch64CpuSupports(ArrayRef<StringRef> FeaturesStrs) {}Value *CodeGenFunction::EmitRISCVCpuSupports(const CallExpr *E) {}static Value *loadRISCVFeatureBits(unsigned Index, CGBuilderTy &Builder,
                                   CodeGenModule &CGM) {}Value *CodeGenFunction::EmitRISCVCpuSupports(ArrayRef<StringRef> FeaturesStrs) {}Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
                                           const CallExpr *E) {}Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
                                           const CallExpr *E) {}Value *EmitAMDGPUDispatchPtr(CodeGenFunction &CGF,
                             const CallExpr *E = nullptr) {}Value *EmitAMDGPUImplicitArgPtr(CodeGenFunction &CGF) {}Value *EmitAMDGPUWorkGroupSize(CodeGenFunction &CGF, unsigned Index) {}Value *EmitAMDGPUGridSize(CodeGenFunction &CGF, unsigned Index) {}void CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope,
                                              llvm::AtomicOrdering &AO,
                                              llvm::SyncScope::ID &SSID) {}llvm::Value *CodeGenFunction::EmitScalarOrConstFoldImmArg(unsigned ICEArguments,
                                                          unsigned Idx,
                                                          const CallExpr *E) {}static Intrinsic::ID getDotProductIntrinsic(CGHLSLRuntime &RT, QualType QT) {}Value *CodeGenFunction::EmitHLSLBuiltinExpr(unsigned BuiltinID,
                                            const CallExpr *E,
                                            ReturnValueSlot ReturnValue) {}void CodeGenFunction::AddAMDGPUFenceAddressSpaceMMRA(llvm::Instruction *Inst,
                                                     const CallExpr *E) {}Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
                                              const CallExpr *E) {}static Value *EmitSystemZIntrinsicWithCC(CodeGenFunction &CGF,
                                         unsigned IntrinsicID,
                                         const CallExpr *E) {}Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
                                               const CallExpr *E) {}struct NVPTXMmaLdstInfo {}#define MMA_INTR#define MMA_LDSTstatic NVPTXMmaLdstInfo getNVPTXMmaLdstInfo(unsigned BuiltinID) {}#undef MMA_LDST#undef MMA_INTRstruct NVPTXMmaInfo {}static NVPTXMmaInfo getNVPTXMmaInfo(unsigned BuiltinID) {}static Value *MakeLdgLdu(unsigned IntrinsicID, CodeGenFunction &CGF,
                         const CallExpr *E) {}static Value *MakeScopedAtomic(unsigned IntrinsicID, CodeGenFunction &CGF,
                               const CallExpr *E) {}static Value *MakeCpAsync(unsigned IntrinsicID, unsigned IntrinsicIDS,
                          CodeGenFunction &CGF, const CallExpr *E,
                          int SrcSize) {}static Value *MakeHalfType(unsigned IntrinsicID, unsigned BuiltinID,
                           const CallExpr *E, CodeGenFunction &CGF) {}Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
                                             const CallExpr *E) {}struct BuiltinAlignArgs {}RValue CodeGenFunction::EmitBuiltinIsAligned(const CallExpr *E) {}RValue CodeGenFunction::EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp) {}Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
                                                   const CallExpr *E) {}static std::pair<Intrinsic::ID, unsigned>
getIntrinsicForHexagonNonClangBuiltin(unsigned BuiltinID) {}Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
                                               const CallExpr *E) {}Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
                                             const CallExpr *E,
                                             ReturnValueSlot ReturnValue) {}