llvm/llvm/lib/IR/AutoUpgrade.cpp

//===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the auto-upgrade helper functions.
// This is where deprecated IR intrinsics and other IR features are updated to
// current specifications.
//
//===----------------------------------------------------------------------===//

#include "llvm/IR/AutoUpgrade.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/IR/AttributeMask.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InstVisitor.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsAArch64.h"
#include "llvm/IR/IntrinsicsARM.h"
#include "llvm/IR/IntrinsicsNVPTX.h"
#include "llvm/IR/IntrinsicsRISCV.h"
#include "llvm/IR/IntrinsicsWebAssembly.h"
#include "llvm/IR/IntrinsicsX86.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Verifier.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Regex.h"
#include "llvm/TargetParser/Triple.h"
#include <cstring>

usingnamespacellvm;

static cl::opt<bool>
    DisableAutoUpgradeDebugInfo("disable-auto-upgrade-debug-info",
                                cl::desc("Disable autoupgrade of debug info"));

static void rename(GlobalValue *GV) {}

// Upgrade the declarations of the SSE4.1 ptest intrinsics whose arguments have
// changed their type from v4f32 to v2i64.
static bool upgradePTESTIntrinsic(Function *F, Intrinsic::ID IID,
                                  Function *&NewFn) {}

// Upgrade the declarations of intrinsic functions whose 8-bit immediate mask
// arguments have changed their type from i32 to i8.
static bool upgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID,
                                             Function *&NewFn) {}

// Upgrade the declaration of fp compare intrinsics that change return type
// from scalar to vXi1 mask.
static bool upgradeX86MaskedFPCompare(Function *F, Intrinsic::ID IID,
                                      Function *&NewFn) {}

static bool upgradeX86BF16Intrinsic(Function *F, Intrinsic::ID IID,
                                    Function *&NewFn) {}

static bool upgradeX86BF16DPIntrinsic(Function *F, Intrinsic::ID IID,
                                      Function *&NewFn) {}

static bool shouldUpgradeX86Intrinsic(Function *F, StringRef Name) {}

static bool upgradeX86IntrinsicFunction(Function *F, StringRef Name,
                                        Function *&NewFn) {}

// Upgrade ARM (IsArm) or Aarch64 (!IsArm) intrinsic fns. Return true iff so.
// IsArm: 'arm.*', !IsArm: 'aarch64.*'.
static bool upgradeArmOrAarch64IntrinsicFunction(bool IsArm, Function *F,
                                                 StringRef Name,
                                                 Function *&NewFn) {}

static Intrinsic::ID shouldUpgradeNVPTXBF16Intrinsic(StringRef Name) {}

static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
                                      bool CanUpgradeDebugIntrinsicsToRecords) {}

bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn,
                                    bool CanUpgradeDebugIntrinsicsToRecords) {}

GlobalVariable *llvm::UpgradeGlobalVariable(GlobalVariable *GV) {}

// Handles upgrading SSE2/AVX2/AVX512BW PSLLDQ intrinsics by converting them
// to byte shuffles.
static Value *upgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder, Value *Op,
                                         unsigned Shift) {}

// Handles upgrading SSE2/AVX2/AVX512BW PSRLDQ intrinsics by converting them
// to byte shuffles.
static Value *upgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, Value *Op,
                                         unsigned Shift) {}

static Value *getX86MaskVec(IRBuilder<> &Builder, Value *Mask,
                            unsigned NumElts) {}

static Value *emitX86Select(IRBuilder<> &Builder, Value *Mask, Value *Op0,
                            Value *Op1) {}

static Value *emitX86ScalarSelect(IRBuilder<> &Builder, Value *Mask, Value *Op0,
                                  Value *Op1) {}

// Handle autoupgrade for masked PALIGNR and VALIGND/Q intrinsics.
// PALIGNR handles large immediates by shifting while VALIGN masks the immediate
// so we need to handle both cases. VALIGN also doesn't have 128-bit lanes.
static Value *upgradeX86ALIGNIntrinsics(IRBuilder<> &Builder, Value *Op0,
                                        Value *Op1, Value *Shift,
                                        Value *Passthru, Value *Mask,
                                        bool IsVALIGN) {}

static Value *upgradeX86VPERMT2Intrinsics(IRBuilder<> &Builder, CallBase &CI,
                                          bool ZeroMask, bool IndexForm) {}

static Value *upgradeX86BinaryIntrinsics(IRBuilder<> &Builder, CallBase &CI,
                                         Intrinsic::ID IID) {}

static Value *upgradeX86Rotate(IRBuilder<> &Builder, CallBase &CI,
                               bool IsRotateRight) {}

static Value *upgradeX86vpcom(IRBuilder<> &Builder, CallBase &CI, unsigned Imm,
                              bool IsSigned) {}

static Value *upgradeX86ConcatShift(IRBuilder<> &Builder, CallBase &CI,
                                    bool IsShiftRight, bool ZeroMask) {}

static Value *upgradeMaskedStore(IRBuilder<> &Builder, Value *Ptr, Value *Data,
                                 Value *Mask, bool Aligned) {}

static Value *upgradeMaskedLoad(IRBuilder<> &Builder, Value *Ptr,
                                Value *Passthru, Value *Mask, bool Aligned) {}

static Value *upgradeAbs(IRBuilder<> &Builder, CallBase &CI) {}

static Value *upgradePMULDQ(IRBuilder<> &Builder, CallBase &CI, bool IsSigned) {}

// Applying mask on vector of i1's and make sure result is at least 8 bits wide.
static Value *applyX86MaskOn1BitsVec(IRBuilder<> &Builder, Value *Vec,
                                     Value *Mask) {}

static Value *upgradeMaskedCompare(IRBuilder<> &Builder, CallBase &CI,
                                   unsigned CC, bool Signed) {}

// Replace a masked intrinsic with an older unmasked intrinsic.
static Value *upgradeX86MaskedShift(IRBuilder<> &Builder, CallBase &CI,
                                    Intrinsic::ID IID) {}

static Value *upgradeMaskedMove(IRBuilder<> &Builder, CallBase &CI) {}

static Value *upgradeMaskToInt(IRBuilder<> &Builder, CallBase &CI) {}

// Replace intrinsic with unmasked version and a select.
static bool upgradeAVX512MaskToSelect(StringRef Name, IRBuilder<> &Builder,
                                      CallBase &CI, Value *&Rep) {}

/// Upgrade comment in call to inline asm that represents an objc retain release
/// marker.
void llvm::UpgradeInlineAsmString(std::string *AsmStr) {}

static Value *upgradeNVVMIntrinsicCall(StringRef Name, CallBase *CI,
                                       Function *F, IRBuilder<> &Builder) {}

static Value *upgradeX86IntrinsicCall(StringRef Name, CallBase *CI, Function *F,
                                      IRBuilder<> &Builder) {}

static Value *upgradeAArch64IntrinsicCall(StringRef Name, CallBase *CI,
                                          Function *F, IRBuilder<> &Builder) {}

static Value *upgradeARMIntrinsicCall(StringRef Name, CallBase *CI, Function *F,
                                      IRBuilder<> &Builder) {}

// These are expected to have the arguments:
// atomic.intrin (ptr, rmw_value, ordering, scope, isVolatile)
//
// Except for int_amdgcn_ds_fadd_v2bf16 which only has (ptr, rmw_value).
//
static Value *upgradeAMDGCNIntrinsicCall(StringRef Name, CallBase *CI,
                                         Function *F, IRBuilder<> &Builder) {}

/// Helper to unwrap intrinsic call MetadataAsValue operands.
template <typename MDType>
static MDType *unwrapMAVOp(CallBase *CI, unsigned Op) {}

/// Convert debug intrinsic calls to non-instruction debug records.
/// \p Name - Final part of the intrinsic name, e.g. 'value' in llvm.dbg.value.
/// \p CI - The debug intrinsic call.
static void upgradeDbgIntrinsicToDbgRecord(StringRef Name, CallBase *CI) {}

/// Upgrade a call to an old intrinsic. All argument and return casting must be
/// provided to seamlessly integrate with existing context.
void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {}

void llvm::UpgradeCallsToIntrinsic(Function *F) {}

MDNode *llvm::UpgradeTBAANode(MDNode &MD) {}

Instruction *llvm::UpgradeBitCastInst(unsigned Opc, Value *V, Type *DestTy,
                                      Instruction *&Temp) {}

Constant *llvm::UpgradeBitCastExpr(unsigned Opc, Constant *C, Type *DestTy) {}

/// Check the debug info version number, if it is out-dated, drop the debug
/// info. Return true if module is modified.
bool llvm::UpgradeDebugInfo(Module &M) {}

/// This checks for objc retain release marker which should be upgraded. It
/// returns true if module is modified.
static bool upgradeRetainReleaseMarker(Module &M) {}

void llvm::UpgradeARCRuntime(Module &M) {}

bool llvm::UpgradeModuleFlags(Module &M) {}

void llvm::UpgradeSectionAttributes(Module &M) {}

namespace {
// Prior to LLVM 10.0, the strictfp attribute could be used on individual
// callsites within a function that did not also have the strictfp attribute.
// Since 10.0, if strict FP semantics are needed within a function, the
// function must have the strictfp attribute and all calls within the function
// must also have the strictfp attribute. This latter restriction is
// necessary to prevent unwanted libcall simplification when a function is
// being cloned (such as for inlining).
//
// The "dangling" strictfp attribute usage was only used to prevent constant
// folding and other libcall simplification. The nobuiltin attribute on the
// callsite has the same effect.
struct StrictFPUpgradeVisitor : public InstVisitor<StrictFPUpgradeVisitor> {};

/// Replace "amdgpu-unsafe-fp-atomics" metadata with atomicrmw metadata
struct AMDGPUUnsafeFPAtomicsUpgradeVisitor
    : public InstVisitor<AMDGPUUnsafeFPAtomicsUpgradeVisitor> {};
} // namespace

void llvm::UpgradeFunctionAttributes(Function &F) {}

static bool isOldLoopArgument(Metadata *MD) {}

static MDString *upgradeLoopTag(LLVMContext &C, StringRef OldTag) {}

static Metadata *upgradeLoopArgument(Metadata *MD) {}

MDNode *llvm::upgradeInstructionLoopAttachment(MDNode &N) {}

std::string llvm::UpgradeDataLayoutString(StringRef DL, StringRef TT) {}

void llvm::UpgradeAttributes(AttrBuilder &B) {}

void llvm::UpgradeOperandBundles(std::vector<OperandBundleDef> &Bundles) {}