llvm/llvm/lib/Target/SPIRV/SPIRVInstructionSelector.cpp

//===- SPIRVInstructionSelector.cpp ------------------------------*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the targeting of the InstructionSelector class for
// SPIRV.
// TODO: This should be generated by TableGen.
//
//===----------------------------------------------------------------------===//

#include "MCTargetDesc/SPIRVBaseInfo.h"
#include "MCTargetDesc/SPIRVMCTargetDesc.h"
#include "SPIRV.h"
#include "SPIRVGlobalRegistry.h"
#include "SPIRVInstrInfo.h"
#include "SPIRVRegisterBankInfo.h"
#include "SPIRVRegisterInfo.h"
#include "SPIRVTargetMachine.h"
#include "SPIRVUtils.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Register.h"
#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/IR/IntrinsicsSPIRV.h"
#include "llvm/Support/Debug.h"

#define DEBUG_TYPE "spirv-isel"

using namespace llvm;
namespace CL = SPIRV::OpenCLExtInst;
namespace GL = SPIRV::GLSLExtInst;

using ExtInstList =
    std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;

namespace {

#define GET_GLOBALISEL_PREDICATE_BITSET
#include "SPIRVGenGlobalISel.inc"
#undef GET_GLOBALISEL_PREDICATE_BITSET

class SPIRVInstructionSelector : public InstructionSelector {
  const SPIRVSubtarget &STI;
  const SPIRVInstrInfo &TII;
  const SPIRVRegisterInfo &TRI;
  const RegisterBankInfo &RBI;
  SPIRVGlobalRegistry &GR;
  MachineRegisterInfo *MRI;
  MachineFunction *HasVRegsReset = nullptr;

  /// We need to keep track of the number we give to anonymous global values to
  /// generate the same name every time when this is needed.
  mutable DenseMap<const GlobalValue *, unsigned> UnnamedGlobalIDs;

public:
  SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
                           const SPIRVSubtarget &ST,
                           const RegisterBankInfo &RBI);
  void setupMF(MachineFunction &MF, GISelKnownBits *KB,
               CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
               BlockFrequencyInfo *BFI) override;
  // Common selection code. Instruction-specific selection occurs in spvSelect.
  bool select(MachineInstr &I) override;
  static const char *getName() { return DEBUG_TYPE; }

#define GET_GLOBALISEL_PREDICATES_DECL
#include "SPIRVGenGlobalISel.inc"
#undef GET_GLOBALISEL_PREDICATES_DECL

#define GET_GLOBALISEL_TEMPORARIES_DECL
#include "SPIRVGenGlobalISel.inc"
#undef GET_GLOBALISEL_TEMPORARIES_DECL

private:
  void resetVRegsType(MachineFunction &MF);

  // tblgen-erated 'select' implementation, used as the initial selector for
  // the patterns that don't require complex C++.
  bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;

  // All instruction-specific selection that didn't happen in "select()".
  // Is basically a large Switch/Case delegating to all other select method.
  bool spvSelect(Register ResVReg, const SPIRVType *ResType,
                 MachineInstr &I) const;

  bool selectGlobalValue(Register ResVReg, MachineInstr &I,
                         const MachineInstr *Init = nullptr) const;

  bool selectUnOpWithSrc(Register ResVReg, const SPIRVType *ResType,
                         MachineInstr &I, Register SrcReg,
                         unsigned Opcode) const;
  bool selectUnOp(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
                  unsigned Opcode) const;

  bool selectBitcast(Register ResVReg, const SPIRVType *ResType,
                     MachineInstr &I) const;

  bool selectLoad(Register ResVReg, const SPIRVType *ResType,
                  MachineInstr &I) const;
  bool selectStore(MachineInstr &I) const;

  bool selectStackSave(Register ResVReg, const SPIRVType *ResType,
                       MachineInstr &I) const;
  bool selectStackRestore(MachineInstr &I) const;

  bool selectMemOperation(Register ResVReg, MachineInstr &I) const;

  bool selectAtomicRMW(Register ResVReg, const SPIRVType *ResType,
                       MachineInstr &I, unsigned NewOpcode,
                       unsigned NegateOpcode = 0) const;

  bool selectAtomicCmpXchg(Register ResVReg, const SPIRVType *ResType,
                           MachineInstr &I) const;

  bool selectFence(MachineInstr &I) const;

  bool selectAddrSpaceCast(Register ResVReg, const SPIRVType *ResType,
                           MachineInstr &I) const;

  bool selectAnyOrAll(Register ResVReg, const SPIRVType *ResType,
                      MachineInstr &I, unsigned OpType) const;

  bool selectAll(Register ResVReg, const SPIRVType *ResType,
                 MachineInstr &I) const;

  bool selectAny(Register ResVReg, const SPIRVType *ResType,
                 MachineInstr &I) const;

  bool selectBitreverse(Register ResVReg, const SPIRVType *ResType,
                        MachineInstr &I) const;

  bool selectBuildVector(Register ResVReg, const SPIRVType *ResType,
                         MachineInstr &I) const;
  bool selectSplatVector(Register ResVReg, const SPIRVType *ResType,
                         MachineInstr &I) const;

  bool selectCmp(Register ResVReg, const SPIRVType *ResType,
                 unsigned comparisonOpcode, MachineInstr &I) const;

  bool selectICmp(Register ResVReg, const SPIRVType *ResType,
                  MachineInstr &I) const;
  bool selectFCmp(Register ResVReg, const SPIRVType *ResType,
                  MachineInstr &I) const;

  bool selectSign(Register ResVReg, const SPIRVType *ResType,
                  MachineInstr &I) const;

  bool selectFloatDot(Register ResVReg, const SPIRVType *ResType,
                      MachineInstr &I) const;

  bool selectOverflowArith(Register ResVReg, const SPIRVType *ResType,
                           MachineInstr &I, unsigned Opcode) const;

  bool selectIntegerDot(Register ResVReg, const SPIRVType *ResType,
                        MachineInstr &I) const;

  void renderImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
                   int OpIdx) const;
  void renderFImm64(MachineInstrBuilder &MIB, const MachineInstr &I,
                    int OpIdx) const;

  bool selectConst(Register ResVReg, const SPIRVType *ResType, const APInt &Imm,
                   MachineInstr &I) const;

  bool selectSelect(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
                    bool IsSigned) const;
  bool selectIToF(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
                  bool IsSigned, unsigned Opcode) const;
  bool selectExt(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
                 bool IsSigned) const;

  bool selectTrunc(Register ResVReg, const SPIRVType *ResType,
                   MachineInstr &I) const;

  bool selectIntToBool(Register IntReg, Register ResVReg, MachineInstr &I,
                       const SPIRVType *intTy, const SPIRVType *boolTy) const;

  bool selectOpUndef(Register ResVReg, const SPIRVType *ResType,
                     MachineInstr &I) const;
  bool selectFreeze(Register ResVReg, const SPIRVType *ResType,
                    MachineInstr &I) const;
  bool selectIntrinsic(Register ResVReg, const SPIRVType *ResType,
                       MachineInstr &I) const;
  bool selectExtractVal(Register ResVReg, const SPIRVType *ResType,
                        MachineInstr &I) const;
  bool selectInsertVal(Register ResVReg, const SPIRVType *ResType,
                       MachineInstr &I) const;
  bool selectExtractElt(Register ResVReg, const SPIRVType *ResType,
                        MachineInstr &I) const;
  bool selectInsertElt(Register ResVReg, const SPIRVType *ResType,
                       MachineInstr &I) const;
  bool selectGEP(Register ResVReg, const SPIRVType *ResType,
                 MachineInstr &I) const;

  bool selectFrameIndex(Register ResVReg, const SPIRVType *ResType,
                        MachineInstr &I) const;
  bool selectAllocaArray(Register ResVReg, const SPIRVType *ResType,
                         MachineInstr &I) const;

  bool selectBranch(MachineInstr &I) const;
  bool selectBranchCond(MachineInstr &I) const;

  bool selectPhi(Register ResVReg, const SPIRVType *ResType,
                 MachineInstr &I) const;

  bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
                     MachineInstr &I, CL::OpenCLExtInst CLInst) const;
  bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
                     MachineInstr &I, CL::OpenCLExtInst CLInst,
                     GL::GLSLExtInst GLInst) const;
  bool selectExtInst(Register ResVReg, const SPIRVType *ResType,
                     MachineInstr &I, const ExtInstList &ExtInsts) const;

  bool selectLog10(Register ResVReg, const SPIRVType *ResType,
                   MachineInstr &I) const;

  bool selectSaturate(Register ResVReg, const SPIRVType *ResType,
                      MachineInstr &I) const;

  bool selectSpvThreadId(Register ResVReg, const SPIRVType *ResType,
                         MachineInstr &I) const;

  bool selectUnmergeValues(MachineInstr &I) const;

  // Utilities
  Register buildI32Constant(uint32_t Val, MachineInstr &I,
                            const SPIRVType *ResType = nullptr) const;

  Register buildZerosVal(const SPIRVType *ResType, MachineInstr &I) const;
  Register buildZerosValF(const SPIRVType *ResType, MachineInstr &I) const;
  Register buildOnesVal(bool AllOnes, const SPIRVType *ResType,
                        MachineInstr &I) const;
  Register buildOnesValF(const SPIRVType *ResType, MachineInstr &I) const;

  bool wrapIntoSpecConstantOp(MachineInstr &I,
                              SmallVector<Register> &CompositeArgs) const;

  Register getUcharPtrTypeReg(MachineInstr &I,
                              SPIRV::StorageClass::StorageClass SC) const;
  MachineInstrBuilder buildSpecConstantOp(MachineInstr &I, Register Dest,
                                          Register Src, Register DestType,
                                          uint32_t Opcode) const;
  MachineInstrBuilder buildConstGenericPtr(MachineInstr &I, Register SrcPtr,
                                           SPIRVType *SrcPtrTy) const;
};

} // end anonymous namespace

#define GET_GLOBALISEL_IMPL
#include "SPIRVGenGlobalISel.inc"
#undef GET_GLOBALISEL_IMPL

SPIRVInstructionSelector::SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
                                                   const SPIRVSubtarget &ST,
                                                   const RegisterBankInfo &RBI)
    : InstructionSelector(), STI(ST), TII(*ST.getInstrInfo()),
      TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
#define GET_GLOBALISEL_PREDICATES_INIT
#include "SPIRVGenGlobalISel.inc"
#undef GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
#include "SPIRVGenGlobalISel.inc"
#undef GET_GLOBALISEL_TEMPORARIES_INIT
{
}

void SPIRVInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
                                       CodeGenCoverage *CoverageInfo,
                                       ProfileSummaryInfo *PSI,
                                       BlockFrequencyInfo *BFI) {
  MRI = &MF.getRegInfo();
  GR.setCurrentFunc(MF);
  InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
}

// Ensure that register classes correspond to pattern matching rules.
void SPIRVInstructionSelector::resetVRegsType(MachineFunction &MF) {
  if (HasVRegsReset == &MF)
    return;
  HasVRegsReset = &MF;

  MachineRegisterInfo &MRI = MF.getRegInfo();
  for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
    Register Reg = Register::index2VirtReg(I);
    LLT RegType = MRI.getType(Reg);
    if (RegType.isScalar())
      MRI.setType(Reg, LLT::scalar(64));
    else if (RegType.isPointer())
      MRI.setType(Reg, LLT::pointer(0, 64));
    else if (RegType.isVector())
      MRI.setType(Reg, LLT::fixed_vector(2, LLT::scalar(64)));
  }
  for (const auto &MBB : MF) {
    for (const auto &MI : MBB) {
      if (MI.getOpcode() != SPIRV::ASSIGN_TYPE)
        continue;
      Register DstReg = MI.getOperand(0).getReg();
      LLT DstType = MRI.getType(DstReg);
      Register SrcReg = MI.getOperand(1).getReg();
      LLT SrcType = MRI.getType(SrcReg);
      if (DstType != SrcType)
        MRI.setType(DstReg, MRI.getType(SrcReg));

      const TargetRegisterClass *DstRC = MRI.getRegClassOrNull(DstReg);
      const TargetRegisterClass *SrcRC = MRI.getRegClassOrNull(SrcReg);
      if (DstRC != SrcRC && SrcRC)
        MRI.setRegClass(DstReg, SrcRC);
    }
  }
}

static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI);

// Defined in SPIRVLegalizerInfo.cpp.
extern bool isTypeFoldingSupported(unsigned Opcode);

bool SPIRVInstructionSelector::select(MachineInstr &I) {
  resetVRegsType(*I.getParent()->getParent());

  assert(I.getParent() && "Instruction should be in a basic block!");
  assert(I.getParent()->getParent() && "Instruction should be in a function!");

  Register Opcode = I.getOpcode();
  // If it's not a GMIR instruction, we've selected it already.
  if (!isPreISelGenericOpcode(Opcode)) {
    if (Opcode == SPIRV::ASSIGN_TYPE) { // These pseudos aren't needed any more.
      Register DstReg = I.getOperand(0).getReg();
      Register SrcReg = I.getOperand(1).getReg();
      auto *Def = MRI->getVRegDef(SrcReg);
      if (isTypeFoldingSupported(Def->getOpcode())) {
        bool Res = selectImpl(I, *CoverageInfo);
        LLVM_DEBUG({
          if (!Res && Def->getOpcode() != TargetOpcode::G_CONSTANT) {
            dbgs() << "Unexpected pattern in ASSIGN_TYPE.\nInstruction: ";
            I.print(dbgs());
          }
        });
        assert(Res || Def->getOpcode() == TargetOpcode::G_CONSTANT);
        if (Res)
          return Res;
      }
      MRI->setRegClass(SrcReg, MRI->getRegClass(DstReg));
      MRI->replaceRegWith(SrcReg, DstReg);
      I.removeFromParent();
      return true;
    } else if (I.getNumDefs() == 1) {
      // Make all vregs 64 bits (for SPIR-V IDs).
      MRI->setType(I.getOperand(0).getReg(), LLT::scalar(64));
    }
    return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
  }

  if (I.getNumOperands() != I.getNumExplicitOperands()) {
    LLVM_DEBUG(errs() << "Generic instr has unexpected implicit operands\n");
    return false;
  }

  // Common code for getting return reg+type, and removing selected instr
  // from parent occurs here. Instr-specific selection happens in spvSelect().
  bool HasDefs = I.getNumDefs() > 0;
  Register ResVReg = HasDefs ? I.getOperand(0).getReg() : Register(0);
  SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) : nullptr;
  assert(!HasDefs || ResType || I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
  if (spvSelect(ResVReg, ResType, I)) {
    if (HasDefs) // Make all vregs 64 bits (for SPIR-V IDs).
      for (unsigned i = 0; i < I.getNumDefs(); ++i)
        MRI->setType(I.getOperand(i).getReg(), LLT::scalar(64));
    I.removeFromParent();
    return true;
  }
  return false;
}

static bool mayApplyGenericSelection(unsigned Opcode) {
  switch (Opcode) {
  case TargetOpcode::G_CONSTANT:
    return false;
  case TargetOpcode::G_SADDO:
  case TargetOpcode::G_SSUBO:
    return true;
  }
  return isTypeFoldingSupported(Opcode);
}

bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
                                         const SPIRVType *ResType,
                                         MachineInstr &I) const {
  const unsigned Opcode = I.getOpcode();
  if (mayApplyGenericSelection(Opcode))
    return selectImpl(I, *CoverageInfo);
  switch (Opcode) {
  case TargetOpcode::G_CONSTANT:
    return selectConst(ResVReg, ResType, I.getOperand(1).getCImm()->getValue(),
                       I);
  case TargetOpcode::G_GLOBAL_VALUE:
    return selectGlobalValue(ResVReg, I);
  case TargetOpcode::G_IMPLICIT_DEF:
    return selectOpUndef(ResVReg, ResType, I);
  case TargetOpcode::G_FREEZE:
    return selectFreeze(ResVReg, ResType, I);

  case TargetOpcode::G_INTRINSIC:
  case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
  case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
    return selectIntrinsic(ResVReg, ResType, I);
  case TargetOpcode::G_BITREVERSE:
    return selectBitreverse(ResVReg, ResType, I);

  case TargetOpcode::G_BUILD_VECTOR:
    return selectBuildVector(ResVReg, ResType, I);
  case TargetOpcode::G_SPLAT_VECTOR:
    return selectSplatVector(ResVReg, ResType, I);

  case TargetOpcode::G_SHUFFLE_VECTOR: {
    MachineBasicBlock &BB = *I.getParent();
    auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorShuffle))
                   .addDef(ResVReg)
                   .addUse(GR.getSPIRVTypeID(ResType))
                   .addUse(I.getOperand(1).getReg())
                   .addUse(I.getOperand(2).getReg());
    for (auto V : I.getOperand(3).getShuffleMask())
      MIB.addImm(V);
    return MIB.constrainAllUses(TII, TRI, RBI);
  }
  case TargetOpcode::G_MEMMOVE:
  case TargetOpcode::G_MEMCPY:
  case TargetOpcode::G_MEMSET:
    return selectMemOperation(ResVReg, I);

  case TargetOpcode::G_ICMP:
    return selectICmp(ResVReg, ResType, I);
  case TargetOpcode::G_FCMP:
    return selectFCmp(ResVReg, ResType, I);

  case TargetOpcode::G_FRAME_INDEX:
    return selectFrameIndex(ResVReg, ResType, I);

  case TargetOpcode::G_LOAD:
    return selectLoad(ResVReg, ResType, I);
  case TargetOpcode::G_STORE:
    return selectStore(I);

  case TargetOpcode::G_BR:
    return selectBranch(I);
  case TargetOpcode::G_BRCOND:
    return selectBranchCond(I);

  case TargetOpcode::G_PHI:
    return selectPhi(ResVReg, ResType, I);

  case TargetOpcode::G_FPTOSI:
    return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToS);
  case TargetOpcode::G_FPTOUI:
    return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToU);

  case TargetOpcode::G_SITOFP:
    return selectIToF(ResVReg, ResType, I, true, SPIRV::OpConvertSToF);
  case TargetOpcode::G_UITOFP:
    return selectIToF(ResVReg, ResType, I, false, SPIRV::OpConvertUToF);

  case TargetOpcode::G_CTPOP:
    return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitCount);
  case TargetOpcode::G_SMIN:
    return selectExtInst(ResVReg, ResType, I, CL::s_min, GL::SMin);
  case TargetOpcode::G_UMIN:
    return selectExtInst(ResVReg, ResType, I, CL::u_min, GL::UMin);

  case TargetOpcode::G_SMAX:
    return selectExtInst(ResVReg, ResType, I, CL::s_max, GL::SMax);
  case TargetOpcode::G_UMAX:
    return selectExtInst(ResVReg, ResType, I, CL::u_max, GL::UMax);

  case TargetOpcode::G_FMA:
    return selectExtInst(ResVReg, ResType, I, CL::fma, GL::Fma);

  case TargetOpcode::G_FPOW:
    return selectExtInst(ResVReg, ResType, I, CL::pow, GL::Pow);
  case TargetOpcode::G_FPOWI:
    return selectExtInst(ResVReg, ResType, I, CL::pown);

  case TargetOpcode::G_FEXP:
    return selectExtInst(ResVReg, ResType, I, CL::exp, GL::Exp);
  case TargetOpcode::G_FEXP2:
    return selectExtInst(ResVReg, ResType, I, CL::exp2, GL::Exp2);

  case TargetOpcode::G_FLOG:
    return selectExtInst(ResVReg, ResType, I, CL::log, GL::Log);
  case TargetOpcode::G_FLOG2:
    return selectExtInst(ResVReg, ResType, I, CL::log2, GL::Log2);
  case TargetOpcode::G_FLOG10:
    return selectLog10(ResVReg, ResType, I);

  case TargetOpcode::G_FABS:
    return selectExtInst(ResVReg, ResType, I, CL::fabs, GL::FAbs);
  case TargetOpcode::G_ABS:
    return selectExtInst(ResVReg, ResType, I, CL::s_abs, GL::SAbs);

  case TargetOpcode::G_FMINNUM:
  case TargetOpcode::G_FMINIMUM:
    return selectExtInst(ResVReg, ResType, I, CL::fmin, GL::NMin);
  case TargetOpcode::G_FMAXNUM:
  case TargetOpcode::G_FMAXIMUM:
    return selectExtInst(ResVReg, ResType, I, CL::fmax, GL::NMax);

  case TargetOpcode::G_FCOPYSIGN:
    return selectExtInst(ResVReg, ResType, I, CL::copysign);

  case TargetOpcode::G_FCEIL:
    return selectExtInst(ResVReg, ResType, I, CL::ceil, GL::Ceil);
  case TargetOpcode::G_FFLOOR:
    return selectExtInst(ResVReg, ResType, I, CL::floor, GL::Floor);

  case TargetOpcode::G_FCOS:
    return selectExtInst(ResVReg, ResType, I, CL::cos, GL::Cos);
  case TargetOpcode::G_FSIN:
    return selectExtInst(ResVReg, ResType, I, CL::sin, GL::Sin);
  case TargetOpcode::G_FTAN:
    return selectExtInst(ResVReg, ResType, I, CL::tan, GL::Tan);
  case TargetOpcode::G_FACOS:
    return selectExtInst(ResVReg, ResType, I, CL::acos, GL::Acos);
  case TargetOpcode::G_FASIN:
    return selectExtInst(ResVReg, ResType, I, CL::asin, GL::Asin);
  case TargetOpcode::G_FATAN:
    return selectExtInst(ResVReg, ResType, I, CL::atan, GL::Atan);
  case TargetOpcode::G_FATAN2:
    return selectExtInst(ResVReg, ResType, I, CL::atan2, GL::Atan2);
  case TargetOpcode::G_FCOSH:
    return selectExtInst(ResVReg, ResType, I, CL::cosh, GL::Cosh);
  case TargetOpcode::G_FSINH:
    return selectExtInst(ResVReg, ResType, I, CL::sinh, GL::Sinh);
  case TargetOpcode::G_FTANH:
    return selectExtInst(ResVReg, ResType, I, CL::tanh, GL::Tanh);

  case TargetOpcode::G_FSQRT:
    return selectExtInst(ResVReg, ResType, I, CL::sqrt, GL::Sqrt);

  case TargetOpcode::G_CTTZ:
  case TargetOpcode::G_CTTZ_ZERO_UNDEF:
    return selectExtInst(ResVReg, ResType, I, CL::ctz);
  case TargetOpcode::G_CTLZ:
  case TargetOpcode::G_CTLZ_ZERO_UNDEF:
    return selectExtInst(ResVReg, ResType, I, CL::clz);

  case TargetOpcode::G_INTRINSIC_ROUND:
    return selectExtInst(ResVReg, ResType, I, CL::round, GL::Round);
  case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
    return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);
  case TargetOpcode::G_INTRINSIC_TRUNC:
    return selectExtInst(ResVReg, ResType, I, CL::trunc, GL::Trunc);
  case TargetOpcode::G_FRINT:
  case TargetOpcode::G_FNEARBYINT:
    return selectExtInst(ResVReg, ResType, I, CL::rint, GL::RoundEven);

  case TargetOpcode::G_SMULH:
    return selectExtInst(ResVReg, ResType, I, CL::s_mul_hi);
  case TargetOpcode::G_UMULH:
    return selectExtInst(ResVReg, ResType, I, CL::u_mul_hi);

  case TargetOpcode::G_SADDSAT:
    return selectExtInst(ResVReg, ResType, I, CL::s_add_sat);
  case TargetOpcode::G_UADDSAT:
    return selectExtInst(ResVReg, ResType, I, CL::u_add_sat);
  case TargetOpcode::G_SSUBSAT:
    return selectExtInst(ResVReg, ResType, I, CL::s_sub_sat);
  case TargetOpcode::G_USUBSAT:
    return selectExtInst(ResVReg, ResType, I, CL::u_sub_sat);

  case TargetOpcode::G_UADDO:
    return selectOverflowArith(ResVReg, ResType, I,
                               ResType->getOpcode() == SPIRV::OpTypeVector
                                   ? SPIRV::OpIAddCarryV
                                   : SPIRV::OpIAddCarryS);
  case TargetOpcode::G_USUBO:
    return selectOverflowArith(ResVReg, ResType, I,
                               ResType->getOpcode() == SPIRV::OpTypeVector
                                   ? SPIRV::OpISubBorrowV
                                   : SPIRV::OpISubBorrowS);
  case TargetOpcode::G_UMULO:
    return selectOverflowArith(ResVReg, ResType, I, SPIRV::OpUMulExtended);
  case TargetOpcode::G_SMULO:
    return selectOverflowArith(ResVReg, ResType, I, SPIRV::OpSMulExtended);

  case TargetOpcode::G_SEXT:
    return selectExt(ResVReg, ResType, I, true);
  case TargetOpcode::G_ANYEXT:
  case TargetOpcode::G_ZEXT:
    return selectExt(ResVReg, ResType, I, false);
  case TargetOpcode::G_TRUNC:
    return selectTrunc(ResVReg, ResType, I);
  case TargetOpcode::G_FPTRUNC:
  case TargetOpcode::G_FPEXT:
    return selectUnOp(ResVReg, ResType, I, SPIRV::OpFConvert);

  case TargetOpcode::G_PTRTOINT:
    return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertPtrToU);
  case TargetOpcode::G_INTTOPTR:
    return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertUToPtr);
  case TargetOpcode::G_BITCAST:
    return selectBitcast(ResVReg, ResType, I);
  case TargetOpcode::G_ADDRSPACE_CAST:
    return selectAddrSpaceCast(ResVReg, ResType, I);
  case TargetOpcode::G_PTR_ADD: {
    // Currently, we get G_PTR_ADD only applied to global variables.
    assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
    Register GV = I.getOperand(1).getReg();
    MachineRegisterInfo::def_instr_iterator II = MRI->def_instr_begin(GV);
    (void)II;
    assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
            (*II).getOpcode() == TargetOpcode::COPY ||
            (*II).getOpcode() == SPIRV::OpVariable) &&
           isImm(I.getOperand(2), MRI));
    // It may be the initialization of a global variable.
    bool IsGVInit = false;
    for (MachineRegisterInfo::use_instr_iterator
             UseIt = MRI->use_instr_begin(I.getOperand(0).getReg()),
             UseEnd = MRI->use_instr_end();
         UseIt != UseEnd; UseIt = std::next(UseIt)) {
      if ((*UseIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
          (*UseIt).getOpcode() == SPIRV::OpVariable) {
        IsGVInit = true;
        break;
      }
    }
    MachineBasicBlock &BB = *I.getParent();
    if (!IsGVInit) {
      SPIRVType *GVType = GR.getSPIRVTypeForVReg(GV);
      SPIRVType *GVPointeeType = GR.getPointeeType(GVType);
      SPIRVType *ResPointeeType = GR.getPointeeType(ResType);
      if (GVPointeeType && ResPointeeType && GVPointeeType != ResPointeeType) {
        // Build a new virtual register that is associated with the required
        // data type.
        Register NewVReg = MRI->createGenericVirtualRegister(MRI->getType(GV));
        MRI->setRegClass(NewVReg, MRI->getRegClass(GV));
        //  Having a correctly typed base we are ready to build the actually
        //  required GEP. It may not be a constant though, because all Operands
        //  of OpSpecConstantOp is to originate from other const instructions,
        //  and only the AccessChain named opcodes accept a global OpVariable
        //  instruction. We can't use an AccessChain opcode because of the type
        //  mismatch between result and base types.
        if (!GR.isBitcastCompatible(ResType, GVType))
          report_fatal_error(
              "incompatible result and operand types in a bitcast");
        Register ResTypeReg = GR.getSPIRVTypeID(ResType);
        MachineInstrBuilder MIB =
            BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitcast))
                .addDef(NewVReg)
                .addUse(ResTypeReg)
                .addUse(GV);
        return MIB.constrainAllUses(TII, TRI, RBI) &&
               BuildMI(BB, I, I.getDebugLoc(),
                       TII.get(STI.isVulkanEnv()
                                   ? SPIRV::OpInBoundsAccessChain
                                   : SPIRV::OpInBoundsPtrAccessChain))
                   .addDef(ResVReg)
                   .addUse(ResTypeReg)
                   .addUse(NewVReg)
                   .addUse(I.getOperand(2).getReg())
                   .constrainAllUses(TII, TRI, RBI);
      } else {
        return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
            .addDef(ResVReg)
            .addUse(GR.getSPIRVTypeID(ResType))
            .addImm(
                static_cast<uint32_t>(SPIRV::Opcode::InBoundsPtrAccessChain))
            .addUse(GV)
            .addUse(I.getOperand(2).getReg())
            .constrainAllUses(TII, TRI, RBI);
      }
    }
    // It's possible to translate G_PTR_ADD to OpSpecConstantOp: either to
    // initialize a global variable with a constant expression (e.g., the test
    // case opencl/basic/progvar_prog_scope_init.ll), or for another use case
    Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32, I, TII), I);
    auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
                   .addDef(ResVReg)
                   .addUse(GR.getSPIRVTypeID(ResType))
                   .addImm(static_cast<uint32_t>(
                       SPIRV::Opcode::InBoundsPtrAccessChain))
                   .addUse(GV)
                   .addUse(Idx)
                   .addUse(I.getOperand(2).getReg());
    return MIB.constrainAllUses(TII, TRI, RBI);
  }

  case TargetOpcode::G_ATOMICRMW_OR:
    return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicOr);
  case TargetOpcode::G_ATOMICRMW_ADD:
    return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicIAdd);
  case TargetOpcode::G_ATOMICRMW_AND:
    return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicAnd);
  case TargetOpcode::G_ATOMICRMW_MAX:
    return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMax);
  case TargetOpcode::G_ATOMICRMW_MIN:
    return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMin);
  case TargetOpcode::G_ATOMICRMW_SUB:
    return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicISub);
  case TargetOpcode::G_ATOMICRMW_XOR:
    return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicXor);
  case TargetOpcode::G_ATOMICRMW_UMAX:
    return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMax);
  case TargetOpcode::G_ATOMICRMW_UMIN:
    return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMin);
  case TargetOpcode::G_ATOMICRMW_XCHG:
    return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicExchange);
  case TargetOpcode::G_ATOMIC_CMPXCHG:
    return selectAtomicCmpXchg(ResVReg, ResType, I);

  case TargetOpcode::G_ATOMICRMW_FADD:
    return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFAddEXT);
  case TargetOpcode::G_ATOMICRMW_FSUB:
    // Translate G_ATOMICRMW_FSUB to OpAtomicFAddEXT with negative value operand
    return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFAddEXT,
                           SPIRV::OpFNegate);
  case TargetOpcode::G_ATOMICRMW_FMIN:
    return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFMinEXT);
  case TargetOpcode::G_ATOMICRMW_FMAX:
    return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicFMaxEXT);

  case TargetOpcode::G_FENCE:
    return selectFence(I);

  case TargetOpcode::G_STACKSAVE:
    return selectStackSave(ResVReg, ResType, I);
  case TargetOpcode::G_STACKRESTORE:
    return selectStackRestore(I);

  case TargetOpcode::G_UNMERGE_VALUES:
    return selectUnmergeValues(I);

  // Discard gen opcodes for intrinsics which we do not expect to actually
  // represent code after lowering or intrinsics which are not implemented but
  // should not crash when found in a customer's LLVM IR input.
  case TargetOpcode::G_TRAP:
  case TargetOpcode::G_DEBUGTRAP:
  case TargetOpcode::G_UBSANTRAP:
  case TargetOpcode::DBG_LABEL:
    return true;

  default:
    return false;
  }
}

bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
                                             const SPIRVType *ResType,
                                             MachineInstr &I,
                                             CL::OpenCLExtInst CLInst) const {
  return selectExtInst(ResVReg, ResType, I,
                       {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
}

bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
                                             const SPIRVType *ResType,
                                             MachineInstr &I,
                                             CL::OpenCLExtInst CLInst,
                                             GL::GLSLExtInst GLInst) const {
  ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
                          {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
  return selectExtInst(ResVReg, ResType, I, ExtInsts);
}

bool SPIRVInstructionSelector::selectExtInst(Register ResVReg,
                                             const SPIRVType *ResType,
                                             MachineInstr &I,
                                             const ExtInstList &Insts) const {

  for (const auto &Ex : Insts) {
    SPIRV::InstructionSet::InstructionSet Set = Ex.first;
    uint32_t Opcode = Ex.second;
    if (STI.canUseExtInstSet(Set)) {
      MachineBasicBlock &BB = *I.getParent();
      auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
                     .addDef(ResVReg)
                     .addUse(GR.getSPIRVTypeID(ResType))
                     .addImm(static_cast<uint32_t>(Set))
                     .addImm(Opcode);
      const unsigned NumOps = I.getNumOperands();
      unsigned Index = 1;
      if (Index < NumOps &&
          I.getOperand(Index).getType() ==
              MachineOperand::MachineOperandType::MO_IntrinsicID)
        Index = 2;
      for (; Index < NumOps; ++Index)
        MIB.add(I.getOperand(Index));
      return MIB.constrainAllUses(TII, TRI, RBI);
    }
  }
  return false;
}

bool SPIRVInstructionSelector::selectUnOpWithSrc(Register ResVReg,
                                                 const SPIRVType *ResType,
                                                 MachineInstr &I,
                                                 Register SrcReg,
                                                 unsigned Opcode) const {
  return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
      .addDef(ResVReg)
      .addUse(GR.getSPIRVTypeID(ResType))
      .addUse(SrcReg)
      .constrainAllUses(TII, TRI, RBI);
}

bool SPIRVInstructionSelector::selectUnOp(Register ResVReg,
                                          const SPIRVType *ResType,
                                          MachineInstr &I,
                                          unsigned Opcode) const {
  if (STI.isOpenCLEnv() && I.getOperand(1).isReg()) {
    Register SrcReg = I.getOperand(1).getReg();
    bool IsGV = false;
    for (MachineRegisterInfo::def_instr_iterator DefIt =
             MRI->def_instr_begin(SrcReg);
         DefIt != MRI->def_instr_end(); DefIt = std::next(DefIt)) {
      if ((*DefIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {
        IsGV = true;
        break;
      }
    }
    if (IsGV) {
      uint32_t SpecOpcode = 0;
      switch (Opcode) {
      case SPIRV::OpConvertPtrToU:
        SpecOpcode = static_cast<uint32_t>(SPIRV::Opcode::ConvertPtrToU);
        break;
      case SPIRV::OpConvertUToPtr:
        SpecOpcode = static_cast<uint32_t>(SPIRV::Opcode::ConvertUToPtr);
        break;
      }
      if (SpecOpcode)
        return BuildMI(*I.getParent(), I, I.getDebugLoc(),
                       TII.get(SPIRV::OpSpecConstantOp))
            .addDef(ResVReg)
            .addUse(GR.getSPIRVTypeID(ResType))
            .addImm(SpecOpcode)
            .addUse(SrcReg)
            .constrainAllUses(TII, TRI, RBI);
    }
  }
  return selectUnOpWithSrc(ResVReg, ResType, I, I.getOperand(1).getReg(),
                           Opcode);
}

bool SPIRVInstructionSelector::selectBitcast(Register ResVReg,
                                             const SPIRVType *ResType,
                                             MachineInstr &I) const {
  Register OpReg = I.getOperand(1).getReg();
  SPIRVType *OpType = OpReg.isValid() ? GR.getSPIRVTypeForVReg(OpReg) : nullptr;
  if (!GR.isBitcastCompatible(ResType, OpType))
    report_fatal_error("incompatible result and operand types in a bitcast");
  return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
}

static void addMemoryOperands(MachineMemOperand *MemOp,
                              MachineInstrBuilder &MIB) {
  uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
  if (MemOp->isVolatile())
    SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
  if (MemOp->isNonTemporal())
    SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
  if (MemOp->getAlign().value())
    SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);

  if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
    MIB.addImm(SpvMemOp);
    if (SpvMemOp & static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
      MIB.addImm(MemOp->getAlign().value());
  }
}

static void addMemoryOperands(uint64_t Flags, MachineInstrBuilder &MIB) {
  uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
  if (Flags & MachineMemOperand::Flags::MOVolatile)
    SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
  if (Flags & MachineMemOperand::Flags::MONonTemporal)
    SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);

  if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None))
    MIB.addImm(SpvMemOp);
}

bool SPIRVInstructionSelector::selectLoad(Register ResVReg,
                                          const SPIRVType *ResType,
                                          MachineInstr &I) const {
  unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
  Register Ptr = I.getOperand(1 + OpOffset).getReg();
  auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
                 .addDef(ResVReg)
                 .addUse(GR.getSPIRVTypeID(ResType))
                 .addUse(Ptr);
  if (!I.getNumMemOperands()) {
    assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
           I.getOpcode() ==
               TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
    addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
  } else {
    addMemoryOperands(*I.memoperands_begin(), MIB);
  }
  return MIB.constrainAllUses(TII, TRI, RBI);
}

bool SPIRVInstructionSelector::selectStore(MachineInstr &I) const {
  unsigned OpOffset = isa<GIntrinsic>(I) ? 1 : 0;
  Register StoreVal = I.getOperand(0 + OpOffset).getReg();
  Register Ptr = I.getOperand(1 + OpOffset).getReg();
  MachineBasicBlock &BB = *I.getParent();
  auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpStore))
                 .addUse(Ptr)
                 .addUse(StoreVal);
  if (!I.getNumMemOperands()) {
    assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
           I.getOpcode() ==
               TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
    addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
  } else {
    addMemoryOperands(*I.memoperands_begin(), MIB);
  }
  return MIB.constrainAllUses(TII, TRI, RBI);
}

bool SPIRVInstructionSelector::selectStackSave(Register ResVReg,
                                               const SPIRVType *ResType,
                                               MachineInstr &I) const {
  if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
    report_fatal_error(
        "llvm.stacksave intrinsic: this instruction requires the following "
        "SPIR-V extension: SPV_INTEL_variable_length_array",
        false);
  MachineBasicBlock &BB = *I.getParent();
  return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSaveMemoryINTEL))
      .addDef(ResVReg)
      .addUse(GR.getSPIRVTypeID(ResType))
      .constrainAllUses(TII, TRI, RBI);
}

bool SPIRVInstructionSelector::selectStackRestore(MachineInstr &I) const {
  if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
    report_fatal_error(
        "llvm.stackrestore intrinsic: this instruction requires the following "
        "SPIR-V extension: SPV_INTEL_variable_length_array",
        false);
  if (!I.getOperand(0).isReg())
    return false;
  MachineBasicBlock &BB = *I.getParent();
  return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpRestoreMemoryINTEL))
      .addUse(I.getOperand(0).getReg())
      .constrainAllUses(TII, TRI, RBI);
}

bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
                                                  MachineInstr &I) const {
  MachineBasicBlock &BB = *I.getParent();
  Register SrcReg = I.getOperand(1).getReg();
  if (I.getOpcode() == TargetOpcode::G_MEMSET) {
    assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
    unsigned Val = getIConstVal(I.getOperand(1).getReg(), MRI);
    unsigned Num = getIConstVal(I.getOperand(2).getReg(), MRI);
    SPIRVType *ValTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
    SPIRVType *ArrTy = GR.getOrCreateSPIRVArrayType(ValTy, Num, I, TII);
    Register Const = GR.getOrCreateConstIntArray(Val, Num, I, ArrTy, TII);
    SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType(
        ArrTy, I, TII, SPIRV::StorageClass::UniformConstant);
    // TODO: check if we have such GV, add init, use buildGlobalVariable.
    Function &CurFunction = GR.CurMF->getFunction();
    Type *LLVMArrTy =
        ArrayType::get(IntegerType::get(CurFunction.getContext(), 8), Num);
    // Module takes ownership of the global var.
    GlobalVariable *GV = new GlobalVariable(*CurFunction.getParent(), LLVMArrTy,
                                            true, GlobalValue::InternalLinkage,
                                            Constant::getNullValue(LLVMArrTy));
    Register VarReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
    GR.add(GV, GR.CurMF, VarReg);

    buildOpDecorate(VarReg, I, TII, SPIRV::Decoration::Constant, {});
    BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
        .addDef(VarReg)
        .addUse(GR.getSPIRVTypeID(VarTy))
        .addImm(SPIRV::StorageClass::UniformConstant)
        .addUse(Const)
        .constrainAllUses(TII, TRI, RBI);
    SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType(
        ValTy, I, TII, SPIRV::StorageClass::UniformConstant);
    SrcReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
    selectUnOpWithSrc(SrcReg, SourceTy, I, VarReg, SPIRV::OpBitcast);
  }
  auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemorySized))
                 .addUse(I.getOperand(0).getReg())
                 .addUse(SrcReg)
                 .addUse(I.getOperand(2).getReg());
  if (I.getNumMemOperands())
    addMemoryOperands(*I.memoperands_begin(), MIB);
  bool Result = MIB.constrainAllUses(TII, TRI, RBI);
  if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg())
    BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg)
        .addUse(MIB->getOperand(0).getReg());
  return Result;
}

bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
                                               const SPIRVType *ResType,
                                               MachineInstr &I,
                                               unsigned NewOpcode,
                                               unsigned NegateOpcode) const {
  assert(I.hasOneMemOperand());
  const MachineMemOperand *MemOp = *I.memoperands_begin();
  uint32_t Scope = static_cast<uint32_t>(getMemScope(
      GR.CurMF->getFunction().getContext(), MemOp->getSyncScopeID()));
  Register ScopeReg = buildI32Constant(Scope, I);

  Register Ptr = I.getOperand(1).getReg();
  // TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll
  // auto ScSem =
  // getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr));
  AtomicOrdering AO = MemOp->getSuccessOrdering();
  uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
  Register MemSemReg = buildI32Constant(MemSem /*| ScSem*/, I);

  bool Result = false;
  Register ValueReg = I.getOperand(2).getReg();
  if (NegateOpcode != 0) {
    // Translation with negative value operand is requested
    Register TmpReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
    Result |= selectUnOpWithSrc(TmpReg, ResType, I, ValueReg, NegateOpcode);
    ValueReg = TmpReg;
  }

  Result |= BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode))
                .addDef(ResVReg)
                .addUse(GR.getSPIRVTypeID(ResType))
                .addUse(Ptr)
                .addUse(ScopeReg)
                .addUse(MemSemReg)
                .addUse(ValueReg)
                .constrainAllUses(TII, TRI, RBI);
  return Result;
}

bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &I) const {
  unsigned ArgI = I.getNumOperands() - 1;
  Register SrcReg =
      I.getOperand(ArgI).isReg() ? I.getOperand(ArgI).getReg() : Register(0);
  SPIRVType *DefType =
      SrcReg.isValid() ? GR.getSPIRVTypeForVReg(SrcReg) : nullptr;
  if (!DefType || DefType->getOpcode() != SPIRV::OpTypeVector)
    report_fatal_error(
        "cannot select G_UNMERGE_VALUES with a non-vector argument");

  SPIRVType *ScalarType =
      GR.getSPIRVTypeForVReg(DefType->getOperand(1).getReg());
  MachineBasicBlock &BB = *I.getParent();
  bool Res = false;
  for (unsigned i = 0; i < I.getNumDefs(); ++i) {
    Register ResVReg = I.getOperand(i).getReg();
    SPIRVType *ResType = GR.getSPIRVTypeForVReg(ResVReg);
    if (!ResType) {
      // There was no "assign type" actions, let's fix this now
      ResType = ScalarType;
      MRI->setRegClass(ResVReg, GR.getRegClass(ResType));
      MRI->setType(ResVReg, LLT::scalar(GR.getScalarOrVectorBitWidth(ResType)));
      GR.assignSPIRVTypeToVReg(ResType, ResVReg, *GR.CurMF);
    }
    auto MIB =
        BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
            .addDef(ResVReg)
            .addUse(GR.getSPIRVTypeID(ResType))
            .addUse(SrcReg)
            .addImm(static_cast<int64_t>(i));
    Res |= MIB.constrainAllUses(TII, TRI, RBI);
  }
  return Res;
}

bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const {
  AtomicOrdering AO = AtomicOrdering(I.getOperand(0).getImm());
  uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
  Register MemSemReg = buildI32Constant(MemSem, I);
  SyncScope::ID Ord = SyncScope::ID(I.getOperand(1).getImm());
  uint32_t Scope = static_cast<uint32_t>(
      getMemScope(GR.CurMF->getFunction().getContext(), Ord));
  Register ScopeReg = buildI32Constant(Scope, I);
  MachineBasicBlock &BB = *I.getParent();
  return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier))
      .addUse(ScopeReg)
      .addUse(MemSemReg)
      .constrainAllUses(TII, TRI, RBI);
}

bool SPIRVInstructionSelector::selectOverflowArith(Register ResVReg,
                                                   const SPIRVType *ResType,
                                                   MachineInstr &I,
                                                   unsigned Opcode) const {
  Type *ResTy = nullptr;
  StringRef ResName;
  if (!GR.findValueAttrs(&I, ResTy, ResName))
    report_fatal_error(
        "Not enough info to select the arithmetic with overflow instruction");
  if (!ResTy || !ResTy->isStructTy())
    report_fatal_error("Expect struct type result for the arithmetic "
                       "with overflow instruction");
  // "Result Type must be from OpTypeStruct. The struct must have two members,
  // and the two members must be the same type."
  Type *ResElemTy = cast<StructType>(ResTy)->getElementType(0);
  ResTy = StructType::create(SmallVector<Type *, 2>{ResElemTy, ResElemTy});
  // Build SPIR-V types and constant(s) if needed.
  MachineIRBuilder MIRBuilder(I);
  SPIRVType *StructType = GR.getOrCreateSPIRVType(
      ResTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false);
  assert(I.getNumDefs() > 1 && "Not enought operands");
  SPIRVType *BoolType = GR.getOrCreateSPIRVBoolType(I, TII);
  unsigned N = GR.getScalarOrVectorComponentCount(ResType);
  if (N > 1)
    BoolType = GR.getOrCreateSPIRVVectorType(BoolType, N, I, TII);
  Register BoolTypeReg = GR.getSPIRVTypeID(BoolType);
  Register ZeroReg = buildZerosVal(ResType, I);
  // A new virtual register to store the result struct.
  Register StructVReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
  MRI->setRegClass(StructVReg, &SPIRV::IDRegClass);
  // Build the result name if needed.
  if (ResName.size() > 0)
    buildOpName(StructVReg, ResName, MIRBuilder);
  // Build the arithmetic with overflow instruction.
  MachineBasicBlock &BB = *I.getParent();
  auto MIB =
      BuildMI(BB, MIRBuilder.getInsertPt(), I.getDebugLoc(), TII.get(Opcode))
          .addDef(StructVReg)
          .addUse(GR.getSPIRVTypeID(StructType));
  for (unsigned i = I.getNumDefs(); i < I.getNumOperands(); ++i)
    MIB.addUse(I.getOperand(i).getReg());
  bool Status = MIB.constrainAllUses(TII, TRI, RBI);
  // Build instructions to extract fields of the instruction's result.
  // A new virtual register to store the higher part of the result struct.
  Register HigherVReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
  MRI->setRegClass(HigherVReg, &SPIRV::iIDRegClass);
  for (unsigned i = 0; i < I.getNumDefs(); ++i) {
    auto MIB =
        BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
            .addDef(i == 1 ? HigherVReg : I.getOperand(i).getReg())
            .addUse(GR.getSPIRVTypeID(ResType))
            .addUse(StructVReg)
            .addImm(i);
    Status &= MIB.constrainAllUses(TII, TRI, RBI);
  }
  // Build boolean value from the higher part.
  Status &= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
                .addDef(I.getOperand(1).getReg())
                .addUse(BoolTypeReg)
                .addUse(HigherVReg)
                .addUse(ZeroReg)
                .constrainAllUses(TII, TRI, RBI);
  return Status;
}

bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
                                                   const SPIRVType *ResType,
                                                   MachineInstr &I) const {
  Register ScopeReg;
  Register MemSemEqReg;
  Register MemSemNeqReg;
  Register Ptr = I.getOperand(2).getReg();
  if (!isa<GIntrinsic>(I)) {
    assert(I.hasOneMemOperand());
    const MachineMemOperand *MemOp = *I.memoperands_begin();
    unsigned Scope = static_cast<uint32_t>(getMemScope(
        GR.CurMF->getFunction().getContext(), MemOp->getSyncScopeID()));
    ScopeReg = buildI32Constant(Scope, I);

    unsigned ScSem = static_cast<uint32_t>(
        getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr)));
    AtomicOrdering AO = MemOp->getSuccessOrdering();
    unsigned MemSemEq = static_cast<uint32_t>(getMemSemantics(AO)) | ScSem;
    MemSemEqReg = buildI32Constant(MemSemEq, I);
    AtomicOrdering FO = MemOp->getFailureOrdering();
    unsigned MemSemNeq = static_cast<uint32_t>(getMemSemantics(FO)) | ScSem;
    MemSemNeqReg =
        MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq, I);
  } else {
    ScopeReg = I.getOperand(5).getReg();
    MemSemEqReg = I.getOperand(6).getReg();
    MemSemNeqReg = I.getOperand(7).getReg();
  }

  Register Cmp = I.getOperand(3).getReg();
  Register Val = I.getOperand(4).getReg();
  SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
  Register ACmpRes = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
  const DebugLoc &DL = I.getDebugLoc();
  bool Result =
      BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange))
          .addDef(ACmpRes)
          .addUse(GR.getSPIRVTypeID(SpvValTy))
          .addUse(Ptr)
          .addUse(ScopeReg)
          .addUse(MemSemEqReg)
          .addUse(MemSemNeqReg)
          .addUse(Val)
          .addUse(Cmp)
          .constrainAllUses(TII, TRI, RBI);
  Register CmpSuccReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
  SPIRVType *BoolTy = GR.getOrCreateSPIRVBoolType(I, TII);
  Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpIEqual))
                .addDef(CmpSuccReg)
                .addUse(GR.getSPIRVTypeID(BoolTy))
                .addUse(ACmpRes)
                .addUse(Cmp)
                .constrainAllUses(TII, TRI, RBI);
  Register TmpReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
  Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
                .addDef(TmpReg)
                .addUse(GR.getSPIRVTypeID(ResType))
                .addUse(ACmpRes)
                .addUse(GR.getOrCreateUndef(I, ResType, TII))
                .addImm(0)
                .constrainAllUses(TII, TRI, RBI);
  Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
                .addDef(ResVReg)
                .addUse(GR.getSPIRVTypeID(ResType))
                .addUse(CmpSuccReg)
                .addUse(TmpReg)
                .addImm(1)
                .constrainAllUses(TII, TRI, RBI);
  return Result;
}

static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC) {
  switch (SC) {
  case SPIRV::StorageClass::Workgroup:
  case SPIRV::StorageClass::CrossWorkgroup:
  case SPIRV::StorageClass::Function:
    return true;
  default:
    return false;
  }
}

static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC) {
  switch (SC) {
  case SPIRV::StorageClass::DeviceOnlyINTEL:
  case SPIRV::StorageClass::HostOnlyINTEL:
    return true;
  default:
    return false;
  }
}

// Returns true ResVReg is referred only from global vars and OpName's.
static bool isASCastInGVar(MachineRegisterInfo *MRI, Register ResVReg) {
  bool IsGRef = false;
  bool IsAllowedRefs =
      std::all_of(MRI->use_instr_begin(ResVReg), MRI->use_instr_end(),
                  [&IsGRef](auto const &It) {
                    unsigned Opcode = It.getOpcode();
                    if (Opcode == SPIRV::OpConstantComposite ||
                        Opcode == SPIRV::OpVariable ||
                        isSpvIntrinsic(It, Intrinsic::spv_init_global))
                      return IsGRef = true;
                    return Opcode == SPIRV::OpName;
                  });
  return IsAllowedRefs && IsGRef;
}

Register SPIRVInstructionSelector::getUcharPtrTypeReg(
    MachineInstr &I, SPIRV::StorageClass::StorageClass SC) const {
  return GR.getSPIRVTypeID(GR.getOrCreateSPIRVPointerType(
      GR.getOrCreateSPIRVIntegerType(8, I, TII), I, TII, SC));
}

MachineInstrBuilder
SPIRVInstructionSelector::buildSpecConstantOp(MachineInstr &I, Register Dest,
                                              Register Src, Register DestType,
                                              uint32_t Opcode) const {
  return BuildMI(*I.getParent(), I, I.getDebugLoc(),
                 TII.get(SPIRV::OpSpecConstantOp))
      .addDef(Dest)
      .addUse(DestType)
      .addImm(Opcode)
      .addUse(Src);
}

MachineInstrBuilder
SPIRVInstructionSelector::buildConstGenericPtr(MachineInstr &I, Register SrcPtr,
                                               SPIRVType *SrcPtrTy) const {
  SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
      GR.getPointeeType(SrcPtrTy), I, TII, SPIRV::StorageClass::Generic);
  Register Tmp = MRI->createVirtualRegister(&SPIRV::pIDRegClass);
  MRI->setType(Tmp, LLT::pointer(storageClassToAddressSpace(
                                     SPIRV::StorageClass::Generic),
                                 GR.getPointerSize()));
  MachineFunction *MF = I.getParent()->getParent();
  GR.assignSPIRVTypeToVReg(GenericPtrTy, Tmp, *MF);
  MachineInstrBuilder MIB = buildSpecConstantOp(
      I, Tmp, SrcPtr, GR.getSPIRVTypeID(GenericPtrTy),
      static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric));
  GR.add(MIB.getInstr(), MF, Tmp);
  return MIB;
}

// In SPIR-V address space casting can only happen to and from the Generic
// storage class. We can also only cast Workgroup, CrossWorkgroup, or Function
// pointers to and from Generic pointers. As such, we can convert e.g. from
// Workgroup to Function by going via a Generic pointer as an intermediary. All
// other combinations can only be done by a bitcast, and are probably not safe.
bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg,
                                                   const SPIRVType *ResType,
                                                   MachineInstr &I) const {
  MachineBasicBlock &BB = *I.getParent();
  const DebugLoc &DL = I.getDebugLoc();

  Register SrcPtr = I.getOperand(1).getReg();
  SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);

  // don't generate a cast for a null that may be represented by OpTypeInt
  if (SrcPtrTy->getOpcode() != SPIRV::OpTypePointer ||
      ResType->getOpcode() != SPIRV::OpTypePointer)
    return BuildMI(BB, I, DL, TII.get(TargetOpcode::COPY))
        .addDef(ResVReg)
        .addUse(SrcPtr)
        .constrainAllUses(TII, TRI, RBI);

  SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtrTy);
  SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResType);

  if (isASCastInGVar(MRI, ResVReg)) {
    // AddrSpaceCast uses within OpVariable and OpConstantComposite instructions
    // are expressed by OpSpecConstantOp with an Opcode.
    // TODO: maybe insert a check whether the Kernel capability was declared and
    // so PtrCastToGeneric/GenericCastToPtr are available.
    unsigned SpecOpcode =
        DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC)
            ? static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric)
            : (SrcSC == SPIRV::StorageClass::Generic &&
                       isGenericCastablePtr(DstSC)
                   ? static_cast<uint32_t>(SPIRV::Opcode::GenericCastToPtr)
                   : 0);
    // TODO: OpConstantComposite expects i8*, so we are forced to forget a
    // correct value of ResType and use general i8* instead. Maybe this should
    // be addressed in the emit-intrinsic step to infer a correct
    // OpConstantComposite type.
    if (SpecOpcode) {
      return buildSpecConstantOp(I, ResVReg, SrcPtr,
                                 getUcharPtrTypeReg(I, DstSC), SpecOpcode)
          .constrainAllUses(TII, TRI, RBI);
    } else if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) {
      MachineInstrBuilder MIB = buildConstGenericPtr(I, SrcPtr, SrcPtrTy);
      return MIB.constrainAllUses(TII, TRI, RBI) &&
             buildSpecConstantOp(
                 I, ResVReg, MIB->getOperand(0).getReg(),
                 getUcharPtrTypeReg(I, DstSC),
                 static_cast<uint32_t>(SPIRV::Opcode::GenericCastToPtr))
                 .constrainAllUses(TII, TRI, RBI);
    }
  }

  // don't generate a cast between identical storage classes
  if (SrcSC == DstSC)
    return BuildMI(BB, I, DL, TII.get(TargetOpcode::COPY))
        .addDef(ResVReg)
        .addUse(SrcPtr)
        .constrainAllUses(TII, TRI, RBI);

  // Casting from an eligible pointer to Generic.
  if (DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC))
    return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
  // Casting from Generic to an eligible pointer.
  if (SrcSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(DstSC))
    return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);
  // Casting between 2 eligible pointers using Generic as an intermediary.
  if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) {
    Register Tmp = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
    SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
        GR.getPointeeType(SrcPtrTy), I, TII, SPIRV::StorageClass::Generic);
    bool Success = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric))
                       .addDef(Tmp)
                       .addUse(GR.getSPIRVTypeID(GenericPtrTy))
                       .addUse(SrcPtr)
                       .constrainAllUses(TII, TRI, RBI);
    return Success && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr))
                          .addDef(ResVReg)
                          .addUse(GR.getSPIRVTypeID(ResType))
                          .addUse(Tmp)
                          .constrainAllUses(TII, TRI, RBI);
  }

  // Check if instructions from the SPV_INTEL_usm_storage_classes extension may
  // be applied
  if (isUSMStorageClass(SrcSC) && DstSC == SPIRV::StorageClass::CrossWorkgroup)
    return selectUnOp(ResVReg, ResType, I,
                      SPIRV::OpPtrCastToCrossWorkgroupINTEL);
  if (SrcSC == SPIRV::StorageClass::CrossWorkgroup && isUSMStorageClass(DstSC))
    return selectUnOp(ResVReg, ResType, I,
                      SPIRV::OpCrossWorkgroupCastToPtrINTEL);
  if (isUSMStorageClass(SrcSC) && DstSC == SPIRV::StorageClass::Generic)
    return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
  if (SrcSC == SPIRV::StorageClass::Generic && isUSMStorageClass(DstSC))
    return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);

  // Bitcast for pointers requires that the address spaces must match
  return false;
}

static unsigned getFCmpOpcode(unsigned PredNum) {
  auto Pred = static_cast<CmpInst::Predicate>(PredNum);
  switch (Pred) {
  case CmpInst::FCMP_OEQ:
    return SPIRV::OpFOrdEqual;
  case CmpInst::FCMP_OGE:
    return SPIRV::OpFOrdGreaterThanEqual;
  case CmpInst::FCMP_OGT:
    return SPIRV::OpFOrdGreaterThan;
  case CmpInst::FCMP_OLE:
    return SPIRV::OpFOrdLessThanEqual;
  case CmpInst::FCMP_OLT:
    return SPIRV::OpFOrdLessThan;
  case CmpInst::FCMP_ONE:
    return SPIRV::OpFOrdNotEqual;
  case CmpInst::FCMP_ORD:
    return SPIRV::OpOrdered;
  case CmpInst::FCMP_UEQ:
    return SPIRV::OpFUnordEqual;
  case CmpInst::FCMP_UGE:
    return SPIRV::OpFUnordGreaterThanEqual;
  case CmpInst::FCMP_UGT:
    return SPIRV::OpFUnordGreaterThan;
  case CmpInst::FCMP_ULE:
    return SPIRV::OpFUnordLessThanEqual;
  case CmpInst::FCMP_ULT:
    return SPIRV::OpFUnordLessThan;
  case CmpInst::FCMP_UNE:
    return SPIRV::OpFUnordNotEqual;
  case CmpInst::FCMP_UNO:
    return SPIRV::OpUnordered;
  default:
    llvm_unreachable("Unknown predicate type for FCmp");
  }
}

static unsigned getICmpOpcode(unsigned PredNum) {
  auto Pred = static_cast<CmpInst::Predicate>(PredNum);
  switch (Pred) {
  case CmpInst::ICMP_EQ:
    return SPIRV::OpIEqual;
  case CmpInst::ICMP_NE:
    return SPIRV::OpINotEqual;
  case CmpInst::ICMP_SGE:
    return SPIRV::OpSGreaterThanEqual;
  case CmpInst::ICMP_SGT:
    return SPIRV::OpSGreaterThan;
  case CmpInst::ICMP_SLE:
    return SPIRV::OpSLessThanEqual;
  case CmpInst::ICMP_SLT:
    return SPIRV::OpSLessThan;
  case CmpInst::ICMP_UGE:
    return SPIRV::OpUGreaterThanEqual;
  case CmpInst::ICMP_UGT:
    return SPIRV::OpUGreaterThan;
  case CmpInst::ICMP_ULE:
    return SPIRV::OpULessThanEqual;
  case CmpInst::ICMP_ULT:
    return SPIRV::OpULessThan;
  default:
    llvm_unreachable("Unknown predicate type for ICmp");
  }
}

static unsigned getPtrCmpOpcode(unsigned Pred) {
  switch (static_cast<CmpInst::Predicate>(Pred)) {
  case CmpInst::ICMP_EQ:
    return SPIRV::OpPtrEqual;
  case CmpInst::ICMP_NE:
    return SPIRV::OpPtrNotEqual;
  default:
    llvm_unreachable("Unknown predicate type for pointer comparison");
  }
}

// Return the logical operation, or abort if none exists.
static unsigned getBoolCmpOpcode(unsigned PredNum) {
  auto Pred = static_cast<CmpInst::Predicate>(PredNum);
  switch (Pred) {
  case CmpInst::ICMP_EQ:
    return SPIRV::OpLogicalEqual;
  case CmpInst::ICMP_NE:
    return SPIRV::OpLogicalNotEqual;
  default:
    llvm_unreachable("Unknown predicate type for Bool comparison");
  }
}

static APFloat getZeroFP(const Type *LLVMFloatTy) {
  if (!LLVMFloatTy)
    return APFloat::getZero(APFloat::IEEEsingle());
  switch (LLVMFloatTy->getScalarType()->getTypeID()) {
  case Type::HalfTyID:
    return APFloat::getZero(APFloat::IEEEhalf());
  default:
  case Type::FloatTyID:
    return APFloat::getZero(APFloat::IEEEsingle());
  case Type::DoubleTyID:
    return APFloat::getZero(APFloat::IEEEdouble());
  }
}

static APFloat getOneFP(const Type *LLVMFloatTy) {
  if (!LLVMFloatTy)
    return APFloat::getOne(APFloat::IEEEsingle());
  switch (LLVMFloatTy->getScalarType()->getTypeID()) {
  case Type::HalfTyID:
    return APFloat::getOne(APFloat::IEEEhalf());
  default:
  case Type::FloatTyID:
    return APFloat::getOne(APFloat::IEEEsingle());
  case Type::DoubleTyID:
    return APFloat::getOne(APFloat::IEEEdouble());
  }
}

bool SPIRVInstructionSelector::selectAnyOrAll(Register ResVReg,
                                              const SPIRVType *ResType,
                                              MachineInstr &I,
                                              unsigned OpAnyOrAll) const {
  assert(I.getNumOperands() == 3);
  assert(I.getOperand(2).isReg());
  MachineBasicBlock &BB = *I.getParent();
  Register InputRegister = I.getOperand(2).getReg();
  SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);

  if (!InputType)
    report_fatal_error("Input Type could not be determined.");

  bool IsBoolTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeBool);
  bool IsVectorTy = InputType->getOpcode() == SPIRV::OpTypeVector;
  if (IsBoolTy && !IsVectorTy) {
    assert(ResVReg == I.getOperand(0).getReg());
    return BuildMI(*I.getParent(), I, I.getDebugLoc(),
                   TII.get(TargetOpcode::COPY))
        .addDef(ResVReg)
        .addUse(InputRegister)
        .constrainAllUses(TII, TRI, RBI);
  }

  bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
  unsigned SpirvNotEqualId =
      IsFloatTy ? SPIRV::OpFOrdNotEqual : SPIRV::OpINotEqual;
  SPIRVType *SpvBoolScalarTy = GR.getOrCreateSPIRVBoolType(I, TII);
  SPIRVType *SpvBoolTy = SpvBoolScalarTy;
  Register NotEqualReg = ResVReg;

  if (IsVectorTy) {
    NotEqualReg = IsBoolTy ? InputRegister
                           : MRI->createVirtualRegister(&SPIRV::iIDRegClass);
    const unsigned NumElts = InputType->getOperand(2).getImm();
    SpvBoolTy = GR.getOrCreateSPIRVVectorType(SpvBoolTy, NumElts, I, TII);
  }

  if (!IsBoolTy) {
    Register ConstZeroReg =
        IsFloatTy ? buildZerosValF(InputType, I) : buildZerosVal(InputType, I);

    BuildMI(BB, I, I.getDebugLoc(), TII.get(SpirvNotEqualId))
        .addDef(NotEqualReg)
        .addUse(GR.getSPIRVTypeID(SpvBoolTy))
        .addUse(InputRegister)
        .addUse(ConstZeroReg)
        .constrainAllUses(TII, TRI, RBI);
  }

  if (!IsVectorTy)
    return true;

  return BuildMI(BB, I, I.getDebugLoc(), TII.get(OpAnyOrAll))
      .addDef(ResVReg)
      .addUse(GR.getSPIRVTypeID(SpvBoolScalarTy))
      .addUse(NotEqualReg)
      .constrainAllUses(TII, TRI, RBI);
}

bool SPIRVInstructionSelector::selectAll(Register ResVReg,
                                         const SPIRVType *ResType,
                                         MachineInstr &I) const {
  return selectAnyOrAll(ResVReg, ResType, I, SPIRV::OpAll);
}

bool SPIRVInstructionSelector::selectAny(Register ResVReg,
                                         const SPIRVType *ResType,
                                         MachineInstr &I) const {
  return selectAnyOrAll(ResVReg, ResType, I, SPIRV::OpAny);
}

// Select the OpDot instruction for the given float dot
bool SPIRVInstructionSelector::selectFloatDot(Register ResVReg,
                                              const SPIRVType *ResType,
                                              MachineInstr &I) const {
  assert(I.getNumOperands() == 4);
  assert(I.getOperand(2).isReg());
  assert(I.getOperand(3).isReg());

  [[maybe_unused]] SPIRVType *VecType =
      GR.getSPIRVTypeForVReg(I.getOperand(2).getReg());

  assert(VecType->getOpcode() == SPIRV::OpTypeVector &&
         GR.getScalarOrVectorComponentCount(VecType) > 1 &&
         "dot product requires a vector of at least 2 components");

  [[maybe_unused]] SPIRVType *EltType =
      GR.getSPIRVTypeForVReg(VecType->getOperand(1).getReg());

  assert(EltType->getOpcode() == SPIRV::OpTypeFloat);

  MachineBasicBlock &BB = *I.getParent();
  return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpDot))
      .addDef(ResVReg)
      .addUse(GR.getSPIRVTypeID(ResType))
      .addUse(I.getOperand(2).getReg())
      .addUse(I.getOperand(3).getReg())
      .constrainAllUses(TII, TRI, RBI);
}

// Since pre-1.6 SPIRV has no integer dot implementation,
// expand by piecewise multiplying and adding the results
bool SPIRVInstructionSelector::selectIntegerDot(Register ResVReg,
                                                const SPIRVType *ResType,
                                                MachineInstr &I) const {
  assert(I.getNumOperands() == 4);
  assert(I.getOperand(2).isReg());
  assert(I.getOperand(3).isReg());
  MachineBasicBlock &BB = *I.getParent();

  // Multiply the vectors, then sum the results
  Register Vec0 = I.getOperand(2).getReg();
  Register Vec1 = I.getOperand(3).getReg();
  Register TmpVec = MRI->createVirtualRegister(&SPIRV::IDRegClass);
  SPIRVType *VecType = GR.getSPIRVTypeForVReg(Vec0);

  bool Result = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIMulV))
                    .addDef(TmpVec)
                    .addUse(GR.getSPIRVTypeID(VecType))
                    .addUse(Vec0)
                    .addUse(Vec1)
                    .constrainAllUses(TII, TRI, RBI);

  assert(VecType->getOpcode() == SPIRV::OpTypeVector &&
         GR.getScalarOrVectorComponentCount(VecType) > 1 &&
         "dot product requires a vector of at least 2 components");

  Register Res = MRI->createVirtualRegister(&SPIRV::IDRegClass);
  Result |= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
                .addDef(Res)
                .addUse(GR.getSPIRVTypeID(ResType))
                .addUse(TmpVec)
                .addImm(0)
                .constrainAllUses(TII, TRI, RBI);

  for (unsigned i = 1; i < GR.getScalarOrVectorComponentCount(VecType); i++) {
    Register Elt = MRI->createVirtualRegister(&SPIRV::IDRegClass);

    Result |=
        BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
            .addDef(Elt)
            .addUse(GR.getSPIRVTypeID(ResType))
            .addUse(TmpVec)
            .addImm(i)
            .constrainAllUses(TII, TRI, RBI);

    Register Sum = i < GR.getScalarOrVectorComponentCount(VecType) - 1
                       ? MRI->createVirtualRegister(&SPIRV::IDRegClass)
                       : ResVReg;

    Result |= BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpIAddS))
                  .addDef(Sum)
                  .addUse(GR.getSPIRVTypeID(ResType))
                  .addUse(Res)
                  .addUse(Elt)
                  .constrainAllUses(TII, TRI, RBI);
    Res = Sum;
  }

  return Result;
}

/// Transform saturate(x) to clamp(x, 0.0f, 1.0f) as SPIRV
/// does not have a saturate builtin.
bool SPIRVInstructionSelector::selectSaturate(Register ResVReg,
                                              const SPIRVType *ResType,
                                              MachineInstr &I) const {
  assert(I.getNumOperands() == 3);
  assert(I.getOperand(2).isReg());
  MachineBasicBlock &BB = *I.getParent();
  Register VZero = buildZerosValF(ResType, I);
  Register VOne = buildOnesValF(ResType, I);

  return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
      .addDef(ResVReg)
      .addUse(GR.getSPIRVTypeID(ResType))
      .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
      .addImm(GL::FClamp)
      .addUse(I.getOperand(2).getReg())
      .addUse(VZero)
      .addUse(VOne)
      .constrainAllUses(TII, TRI, RBI);
}

bool SPIRVInstructionSelector::selectSign(Register ResVReg,
                                          const SPIRVType *ResType,
                                          MachineInstr &I) const {
  assert(I.getNumOperands() == 3);
  assert(I.getOperand(2).isReg());
  MachineBasicBlock &BB = *I.getParent();
  Register InputRegister = I.getOperand(2).getReg();
  SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
  auto &DL = I.getDebugLoc();

  if (!InputType)
    report_fatal_error("Input Type could not be determined.");

  bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);

  unsigned SignBitWidth = GR.getScalarOrVectorBitWidth(InputType);
  unsigned ResBitWidth = GR.getScalarOrVectorBitWidth(ResType);

  bool NeedsConversion = IsFloatTy || SignBitWidth != ResBitWidth;

  auto SignOpcode = IsFloatTy ? GL::FSign : GL::SSign;
  Register SignReg = NeedsConversion
                         ? MRI->createVirtualRegister(&SPIRV::IDRegClass)
                         : ResVReg;

  bool Result =
      BuildMI(BB, I, DL, TII.get(SPIRV::OpExtInst))
          .addDef(SignReg)
          .addUse(GR.getSPIRVTypeID(InputType))
          .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
          .addImm(SignOpcode)
          .addUse(InputRegister)
          .constrainAllUses(TII, TRI, RBI);

  if (NeedsConversion) {
    auto ConvertOpcode = IsFloatTy ? SPIRV::OpConvertFToS : SPIRV::OpSConvert;
    Result |= BuildMI(*I.getParent(), I, DL, TII.get(ConvertOpcode))
                  .addDef(ResVReg)
                  .addUse(GR.getSPIRVTypeID(ResType))
                  .addUse(SignReg)
                  .constrainAllUses(TII, TRI, RBI);
  }

  return Result;
}

bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg,
                                                const SPIRVType *ResType,
                                                MachineInstr &I) const {
  MachineBasicBlock &BB = *I.getParent();
  return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitReverse))
      .addDef(ResVReg)
      .addUse(GR.getSPIRVTypeID(ResType))
      .addUse(I.getOperand(1).getReg())
      .constrainAllUses(TII, TRI, RBI);
}

bool SPIRVInstructionSelector::selectFreeze(Register ResVReg,
                                            const SPIRVType *ResType,
                                            MachineInstr &I) const {
  // There is no way to implement `freeze` correctly without support on SPIR-V
  // standard side, but we may at least address a simple (static) case when
  // undef/poison value presence is obvious. The main benefit of even
  // incomplete `freeze` support is preventing of translation from crashing due
  // to lack of support on legalization and instruction selection steps.
  if (!I.getOperand(0).isReg() || !I.getOperand(1).isReg())
    return false;
  Register OpReg = I.getOperand(1).getReg();
  if (MachineInstr *Def = MRI->getVRegDef(OpReg)) {
    Register Reg;
    switch (Def->getOpcode()) {
    case SPIRV::ASSIGN_TYPE:
      if (MachineInstr *AssignToDef =
              MRI->getVRegDef(Def->getOperand(1).getReg())) {
        if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
          Reg = Def->getOperand(2).getReg();
      }
      break;
    case SPIRV::OpUndef:
      Reg = Def->getOperand(1).getReg();
      break;
    }
    unsigned DestOpCode;
    if (Reg.isValid()) {
      DestOpCode = SPIRV::OpConstantNull;
    } else {
      DestOpCode = TargetOpcode::COPY;
      Reg = OpReg;
    }
    return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(DestOpCode))
        .addDef(I.getOperand(0).getReg())
        .addUse(Reg)
        .constrainAllUses(TII, TRI, RBI);
  }
  return false;
}

static unsigned getArrayComponentCount(MachineRegisterInfo *MRI,
                                       const SPIRVType *ResType) {
  Register OpReg = ResType->getOperand(2).getReg();
  SPIRVType *OpDef = MRI->getVRegDef(OpReg);
  if (!OpDef)
    return 0;
  if (OpDef->getOpcode() == SPIRV::ASSIGN_TYPE &&
      OpDef->getOperand(1).isReg()) {
    if (SPIRVType *RefDef = MRI->getVRegDef(OpDef->getOperand(1).getReg()))
      OpDef = RefDef;
  }
  unsigned N = OpDef->getOpcode() == TargetOpcode::G_CONSTANT
                   ? OpDef->getOperand(1).getCImm()->getValue().getZExtValue()
                   : 0;
  return N;
}

// Return true if the type represents a constant register
static bool isConstReg(MachineRegisterInfo *MRI, SPIRVType *OpDef,
                       SmallPtrSet<SPIRVType *, 4> &Visited) {
  if (OpDef->getOpcode() == SPIRV::ASSIGN_TYPE &&
      OpDef->getOperand(1).isReg()) {
    if (SPIRVType *RefDef = MRI->getVRegDef(OpDef->getOperand(1).getReg()))
      OpDef = RefDef;
  }

  if (Visited.contains(OpDef))
    return true;
  Visited.insert(OpDef);

  unsigned Opcode = OpDef->getOpcode();
  switch (Opcode) {
  case TargetOpcode::G_CONSTANT:
  case TargetOpcode::G_FCONSTANT:
    return true;
  case TargetOpcode::G_INTRINSIC:
  case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
  case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
    return cast<GIntrinsic>(*OpDef).getIntrinsicID() ==
           Intrinsic::spv_const_composite;
  case TargetOpcode::G_BUILD_VECTOR:
  case TargetOpcode::G_SPLAT_VECTOR: {
    for (unsigned i = OpDef->getNumExplicitDefs(); i < OpDef->getNumOperands();
         i++) {
      SPIRVType *OpNestedDef =
          OpDef->getOperand(i).isReg()
              ? MRI->getVRegDef(OpDef->getOperand(i).getReg())
              : nullptr;
      if (OpNestedDef && !isConstReg(MRI, OpNestedDef, Visited))
        return false;
    }
    return true;
  }
  }
  return false;
}

// Return true if the virtual register represents a constant
static bool isConstReg(MachineRegisterInfo *MRI, Register OpReg) {
  SmallPtrSet<SPIRVType *, 4> Visited;
  if (SPIRVType *OpDef = MRI->getVRegDef(OpReg))
    return isConstReg(MRI, OpDef, Visited);
  return false;
}

bool SPIRVInstructionSelector::selectBuildVector(Register ResVReg,
                                                 const SPIRVType *ResType,
                                                 MachineInstr &I) const {
  unsigned N = 0;
  if (ResType->getOpcode() == SPIRV::OpTypeVector)
    N = GR.getScalarOrVectorComponentCount(ResType);
  else if (ResType->getOpcode() == SPIRV::OpTypeArray)
    N = getArrayComponentCount(MRI, ResType);
  else
    report_fatal_error("Cannot select G_BUILD_VECTOR with a non-vector result");
  if (I.getNumExplicitOperands() - I.getNumExplicitDefs() != N)
    report_fatal_error("G_BUILD_VECTOR and the result type are inconsistent");

  // check if we may construct a constant vector
  bool IsConst = true;
  for (unsigned i = I.getNumExplicitDefs();
       i < I.getNumExplicitOperands() && IsConst; ++i)
    if (!isConstReg(MRI, I.getOperand(i).getReg()))
      IsConst = false;

  if (!IsConst && N < 2)
    report_fatal_error(
        "There must be at least two constituent operands in a vector");

  auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
                     TII.get(IsConst ? SPIRV::OpConstantComposite
                                     : SPIRV::OpCompositeConstruct))
                 .addDef(ResVReg)
                 .addUse(GR.getSPIRVTypeID(ResType));
  for (unsigned i = I.getNumExplicitDefs(); i < I.getNumExplicitOperands(); ++i)
    MIB.addUse(I.getOperand(i).getReg());
  return MIB.constrainAllUses(TII, TRI, RBI);
}

bool SPIRVInstructionSelector::selectSplatVector(Register ResVReg,
                                                 const SPIRVType *ResType,
                                                 MachineInstr &I) const {
  unsigned N = 0;
  if (ResType->getOpcode() == SPIRV::OpTypeVector)
    N = GR.getScalarOrVectorComponentCount(ResType);
  else if (ResType->getOpcode() == SPIRV::OpTypeArray)
    N = getArrayComponentCount(MRI, ResType);
  else
    report_fatal_error("Cannot select G_SPLAT_VECTOR with a non-vector result");

  unsigned OpIdx = I.getNumExplicitDefs();
  if (!I.getOperand(OpIdx).isReg())
    report_fatal_error("Unexpected argument in G_SPLAT_VECTOR");

  // check if we may construct a constant vector
  Register OpReg = I.getOperand(OpIdx).getReg();
  bool IsConst = isConstReg(MRI, OpReg);

  if (!IsConst && N < 2)
    report_fatal_error(
        "There must be at least two constituent operands in a vector");

  auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
                     TII.get(IsConst ? SPIRV::OpConstantComposite
                                     : SPIRV::OpCompositeConstruct))
                 .addDef(ResVReg)
                 .addUse(GR.getSPIRVTypeID(ResType));
  for (unsigned i = 0; i < N; ++i)
    MIB.addUse(OpReg);
  return MIB.constrainAllUses(TII, TRI, RBI);
}

bool SPIRVInstructionSelector::selectCmp(Register ResVReg,
                                         const SPIRVType *ResType,
                                         unsigned CmpOpc,
                                         MachineInstr &I) const {
  Register Cmp0 = I.getOperand(2).getReg();
  Register Cmp1 = I.getOperand(3).getReg();
  assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
             GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
         "CMP operands should have the same type");
  return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CmpOpc))
      .addDef(ResVReg)
      .addUse(GR.getSPIRVTypeID(ResType))
      .addUse(Cmp0)
      .addUse(Cmp1)
      .constrainAllUses(TII, TRI, RBI);
}

bool SPIRVInstructionSelector::selectICmp(Register ResVReg,
                                          const SPIRVType *ResType,
                                          MachineInstr &I) const {
  auto Pred = I.getOperand(1).getPredicate();
  unsigned CmpOpc;

  Register CmpOperand = I.getOperand(2).getReg();
  if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
    CmpOpc = getPtrCmpOpcode(Pred);
  else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
    CmpOpc = getBoolCmpOpcode(Pred);
  else
    CmpOpc = getICmpOpcode(Pred);
  return selectCmp(ResVReg, ResType, CmpOpc, I);
}

void SPIRVInstructionSelector::renderFImm64(MachineInstrBuilder &MIB,
                                            const MachineInstr &I,
                                            int OpIdx) const {
  assert(I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
         "Expected G_FCONSTANT");
  const ConstantFP *FPImm = I.getOperand(1).getFPImm();
  addNumImm(FPImm->getValueAPF().bitcastToAPInt(), MIB);
}

void SPIRVInstructionSelector::renderImm32(MachineInstrBuilder &MIB,
                                           const MachineInstr &I,
                                           int OpIdx) const {
  assert(I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
         "Expected G_CONSTANT");
  addNumImm(I.getOperand(1).getCImm()->getValue(), MIB);
}

Register
SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
                                           const SPIRVType *ResType) const {
  Type *LLVMTy = IntegerType::get(GR.CurMF->getFunction().getContext(), 32);
  const SPIRVType *SpvI32Ty =
      ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32, I, TII);
  // Find a constant in DT or build a new one.
  auto ConstInt = ConstantInt::get(LLVMTy, Val);
  Register NewReg = GR.find(ConstInt, GR.CurMF);
  if (!NewReg.isValid()) {
    NewReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
    GR.add(ConstInt, GR.CurMF, NewReg);
    MachineInstr *MI;
    MachineBasicBlock &BB = *I.getParent();
    if (Val == 0) {
      MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
               .addDef(NewReg)
               .addUse(GR.getSPIRVTypeID(SpvI32Ty));
    } else {
      MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
               .addDef(NewReg)
               .addUse(GR.getSPIRVTypeID(SpvI32Ty))
               .addImm(APInt(32, Val).getZExtValue());
    }
    constrainSelectedInstRegOperands(*MI, TII, TRI, RBI);
  }
  return NewReg;
}

bool SPIRVInstructionSelector::selectFCmp(Register ResVReg,
                                          const SPIRVType *ResType,
                                          MachineInstr &I) const {
  unsigned CmpOp = getFCmpOpcode(I.getOperand(1).getPredicate());
  return selectCmp(ResVReg, ResType, CmpOp, I);
}

Register SPIRVInstructionSelector::buildZerosVal(const SPIRVType *ResType,
                                                 MachineInstr &I) const {
  // OpenCL uses nulls for Zero. In HLSL we don't use null constants.
  bool ZeroAsNull = STI.isOpenCLEnv();
  if (ResType->getOpcode() == SPIRV::OpTypeVector)
    return GR.getOrCreateConstVector(0UL, I, ResType, TII, ZeroAsNull);
  return GR.getOrCreateConstInt(0, I, ResType, TII, ZeroAsNull);
}

Register SPIRVInstructionSelector::buildZerosValF(const SPIRVType *ResType,
                                                  MachineInstr &I) const {
  // OpenCL uses nulls for Zero. In HLSL we don't use null constants.
  bool ZeroAsNull = STI.isOpenCLEnv();
  APFloat VZero = getZeroFP(GR.getTypeForSPIRVType(ResType));
  if (ResType->getOpcode() == SPIRV::OpTypeVector)
    return GR.getOrCreateConstVector(VZero, I, ResType, TII, ZeroAsNull);
  return GR.getOrCreateConstFP(VZero, I, ResType, TII, ZeroAsNull);
}

Register SPIRVInstructionSelector::buildOnesValF(const SPIRVType *ResType,
                                                 MachineInstr &I) const {
  // OpenCL uses nulls for Zero. In HLSL we don't use null constants.
  bool ZeroAsNull = STI.isOpenCLEnv();
  APFloat VOne = getOneFP(GR.getTypeForSPIRVType(ResType));
  if (ResType->getOpcode() == SPIRV::OpTypeVector)
    return GR.getOrCreateConstVector(VOne, I, ResType, TII, ZeroAsNull);
  return GR.getOrCreateConstFP(VOne, I, ResType, TII, ZeroAsNull);
}

Register SPIRVInstructionSelector::buildOnesVal(bool AllOnes,
                                                const SPIRVType *ResType,
                                                MachineInstr &I) const {
  unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
  APInt One =
      AllOnes ? APInt::getAllOnes(BitWidth) : APInt::getOneBitSet(BitWidth, 0);
  if (ResType->getOpcode() == SPIRV::OpTypeVector)
    return GR.getOrCreateConstVector(One.getZExtValue(), I, ResType, TII);
  return GR.getOrCreateConstInt(One.getZExtValue(), I, ResType, TII);
}

bool SPIRVInstructionSelector::selectSelect(Register ResVReg,
                                            const SPIRVType *ResType,
                                            MachineInstr &I,
                                            bool IsSigned) const {
  // To extend a bool, we need to use OpSelect between constants.
  Register ZeroReg = buildZerosVal(ResType, I);
  Register OneReg = buildOnesVal(IsSigned, ResType, I);
  bool IsScalarBool =
      GR.isScalarOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool);
  unsigned Opcode =
      IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectSIVCond;
  return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
      .addDef(ResVReg)
      .addUse(GR.getSPIRVTypeID(ResType))
      .addUse(I.getOperand(1).getReg())
      .addUse(OneReg)
      .addUse(ZeroReg)
      .constrainAllUses(TII, TRI, RBI);
}

bool SPIRVInstructionSelector::selectIToF(Register ResVReg,
                                          const SPIRVType *ResType,
                                          MachineInstr &I, bool IsSigned,
                                          unsigned Opcode) const {
  Register SrcReg = I.getOperand(1).getReg();
  // We can convert bool value directly to float type without OpConvert*ToF,
  // however the translator generates OpSelect+OpConvert*ToF, so we do the same.
  if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
    unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
    SPIRVType *TmpType = GR.getOrCreateSPIRVIntegerType(BitWidth, I, TII);
    if (ResType->getOpcode() == SPIRV::OpTypeVector) {
      const unsigned NumElts = ResType->getOperand(2).getImm();
      TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
    }
    SrcReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
    selectSelect(SrcReg, TmpType, I, false);
  }
  return selectUnOpWithSrc(ResVReg, ResType, I, SrcReg, Opcode);
}

bool SPIRVInstructionSelector::selectExt(Register ResVReg,
                                         const SPIRVType *ResType,
                                         MachineInstr &I, bool IsSigned) const {
  Register SrcReg = I.getOperand(1).getReg();
  if (GR.isScalarOrVectorOfType(SrcReg, SPIRV::OpTypeBool))
    return selectSelect(ResVReg, ResType, I, IsSigned);

  SPIRVType *SrcType = GR.getSPIRVTypeForVReg(SrcReg);
  if (SrcType == ResType) {
    const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(ResVReg);
    const TargetRegisterClass *SrcRC = MRI->getRegClassOrNull(SrcReg);
    if (DstRC != SrcRC && SrcRC)
      MRI->setRegClass(ResVReg, SrcRC);
    return BuildMI(*I.getParent(), I, I.getDebugLoc(),
                   TII.get(TargetOpcode::COPY))
        .addDef(ResVReg)
        .addUse(SrcReg)
        .constrainAllUses(TII, TRI, RBI);
  }

  unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
  return selectUnOp(ResVReg, ResType, I, Opcode);
}

bool SPIRVInstructionSelector::selectIntToBool(Register IntReg,
                                               Register ResVReg,
                                               MachineInstr &I,
                                               const SPIRVType *IntTy,
                                               const SPIRVType *BoolTy) const {
  // To truncate to a bool, we use OpBitwiseAnd 1 and OpINotEqual to zero.
  Register BitIntReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
  bool IsVectorTy = IntTy->getOpcode() == SPIRV::OpTypeVector;
  unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
  Register Zero = buildZerosVal(IntTy, I);
  Register One = buildOnesVal(false, IntTy, I);
  MachineBasicBlock &BB = *I.getParent();
  BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
      .addDef(BitIntReg)
      .addUse(GR.getSPIRVTypeID(IntTy))
      .addUse(IntReg)
      .addUse(One)
      .constrainAllUses(TII, TRI, RBI);
  return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
      .addDef(ResVReg)
      .addUse(GR.getSPIRVTypeID(BoolTy))
      .addUse(BitIntReg)
      .addUse(Zero)
      .constrainAllUses(TII, TRI, RBI);
}

bool SPIRVInstructionSelector::selectTrunc(Register ResVReg,
                                           const SPIRVType *ResType,
                                           MachineInstr &I) const {
  Register IntReg = I.getOperand(1).getReg();
  const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
  if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool))
    return selectIntToBool(IntReg, ResVReg, I, ArgType, ResType);
  if (ArgType == ResType) {
    const TargetRegisterClass *DstRC = MRI->getRegClassOrNull(ResVReg);
    const TargetRegisterClass *SrcRC = MRI->getRegClassOrNull(IntReg);
    if (DstRC != SrcRC && SrcRC)
      MRI->setRegClass(ResVReg, SrcRC);
    return BuildMI(*I.getParent(), I, I.getDebugLoc(),
                   TII.get(TargetOpcode::COPY))
        .addDef(ResVReg)
        .addUse(IntReg)
        .constrainAllUses(TII, TRI, RBI);
  }
  bool IsSigned = GR.isScalarOrVectorSigned(ResType);
  unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
  return selectUnOp(ResVReg, ResType, I, Opcode);
}

bool SPIRVInstructionSelector::selectConst(Register ResVReg,
                                           const SPIRVType *ResType,
                                           const APInt &Imm,
                                           MachineInstr &I) const {
  unsigned TyOpcode = ResType->getOpcode();
  assert(TyOpcode != SPIRV::OpTypePointer || Imm.isZero());
  MachineBasicBlock &BB = *I.getParent();
  if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) &&
      Imm.isZero())
    return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
        .addDef(ResVReg)
        .addUse(GR.getSPIRVTypeID(ResType))
        .constrainAllUses(TII, TRI, RBI);
  if (TyOpcode == SPIRV::OpTypeInt) {
    assert(Imm.getBitWidth() <= 64 && "Unsupported integer width!");
    Register Reg = GR.getOrCreateConstInt(Imm.getZExtValue(), I, ResType, TII);
    if (Reg == ResVReg)
      return true;
    return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
        .addDef(ResVReg)
        .addUse(Reg)
        .constrainAllUses(TII, TRI, RBI);
  }
  auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
                 .addDef(ResVReg)
                 .addUse(GR.getSPIRVTypeID(ResType));
  // <=32-bit integers should be caught by the sdag pattern.
  assert(Imm.getBitWidth() > 32);
  addNumImm(Imm, MIB);
  return MIB.constrainAllUses(TII, TRI, RBI);
}

bool SPIRVInstructionSelector::selectOpUndef(Register ResVReg,
                                             const SPIRVType *ResType,
                                             MachineInstr &I) const {
  return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
      .addDef(ResVReg)
      .addUse(GR.getSPIRVTypeID(ResType))
      .constrainAllUses(TII, TRI, RBI);
}

static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
  assert(MO.isReg());
  const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
  if (TypeInst->getOpcode() == SPIRV::ASSIGN_TYPE) {
    assert(TypeInst->getOperand(1).isReg());
    MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
    return ImmInst->getOpcode() == TargetOpcode::G_CONSTANT;
  }
  return TypeInst->getOpcode() == SPIRV::OpConstantI;
}

static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
  const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
  if (TypeInst->getOpcode() == SPIRV::OpConstantI)
    return TypeInst->getOperand(2).getImm();
  MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
  assert(ImmInst->getOpcode() == TargetOpcode::G_CONSTANT);
  return ImmInst->getOperand(1).getCImm()->getZExtValue();
}

bool SPIRVInstructionSelector::selectInsertVal(Register ResVReg,
                                               const SPIRVType *ResType,
                                               MachineInstr &I) const {
  MachineBasicBlock &BB = *I.getParent();
  auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeInsert))
                 .addDef(ResVReg)
                 .addUse(GR.getSPIRVTypeID(ResType))
                 // object to insert
                 .addUse(I.getOperand(3).getReg())
                 // composite to insert into
                 .addUse(I.getOperand(2).getReg());
  for (unsigned i = 4; i < I.getNumOperands(); i++)
    MIB.addImm(foldImm(I.getOperand(i), MRI));
  return MIB.constrainAllUses(TII, TRI, RBI);
}

bool SPIRVInstructionSelector::selectExtractVal(Register ResVReg,
                                                const SPIRVType *ResType,
                                                MachineInstr &I) const {
  MachineBasicBlock &BB = *I.getParent();
  auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
                 .addDef(ResVReg)
                 .addUse(GR.getSPIRVTypeID(ResType))
                 .addUse(I.getOperand(2).getReg());
  for (unsigned i = 3; i < I.getNumOperands(); i++)
    MIB.addImm(foldImm(I.getOperand(i), MRI));
  return MIB.constrainAllUses(TII, TRI, RBI);
}

bool SPIRVInstructionSelector::selectInsertElt(Register ResVReg,
                                               const SPIRVType *ResType,
                                               MachineInstr &I) const {
  if (isImm(I.getOperand(4), MRI))
    return selectInsertVal(ResVReg, ResType, I);
  MachineBasicBlock &BB = *I.getParent();
  return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorInsertDynamic))
      .addDef(ResVReg)
      .addUse(GR.getSPIRVTypeID(ResType))
      .addUse(I.getOperand(2).getReg())
      .addUse(I.getOperand(3).getReg())
      .addUse(I.getOperand(4).getReg())
      .constrainAllUses(TII, TRI, RBI);
}

bool SPIRVInstructionSelector::selectExtractElt(Register ResVReg,
                                                const SPIRVType *ResType,
                                                MachineInstr &I) const {
  if (isImm(I.getOperand(3), MRI))
    return selectExtractVal(ResVReg, ResType, I);
  MachineBasicBlock &BB = *I.getParent();
  return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorExtractDynamic))
      .addDef(ResVReg)
      .addUse(GR.getSPIRVTypeID(ResType))
      .addUse(I.getOperand(2).getReg())
      .addUse(I.getOperand(3).getReg())
      .constrainAllUses(TII, TRI, RBI);
}

bool SPIRVInstructionSelector::selectGEP(Register ResVReg,
                                         const SPIRVType *ResType,
                                         MachineInstr &I) const {
  const bool IsGEPInBounds = I.getOperand(2).getImm();

  // OpAccessChain could be used for OpenCL, but the SPIRV-LLVM Translator only
  // relies on PtrAccessChain, so we'll try not to deviate. For Vulkan however,
  // we have to use Op[InBounds]AccessChain.
  const unsigned Opcode = STI.isVulkanEnv()
                              ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
                                               : SPIRV::OpAccessChain)
                              : (IsGEPInBounds ? SPIRV::OpInBoundsPtrAccessChain
                                               : SPIRV::OpPtrAccessChain);

  auto Res = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
                 .addDef(ResVReg)
                 .addUse(GR.getSPIRVTypeID(ResType))
                 // Object to get a pointer to.
                 .addUse(I.getOperand(3).getReg());
  // Adding indices.
  const unsigned StartingIndex =
      (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
          ? 5
          : 4;
  for (unsigned i = StartingIndex; i < I.getNumExplicitOperands(); ++i)
    Res.addUse(I.getOperand(i).getReg());
  return Res.constrainAllUses(TII, TRI, RBI);
}

// Maybe wrap a value into OpSpecConstantOp
bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
    MachineInstr &I, SmallVector<Register> &CompositeArgs) const {
  bool Result = true;
  unsigned Lim = I.getNumExplicitOperands();
  for (unsigned i = I.getNumExplicitDefs() + 1; i < Lim; ++i) {
    Register OpReg = I.getOperand(i).getReg();
    SPIRVType *OpDefine = MRI->getVRegDef(OpReg);
    SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
    SmallPtrSet<SPIRVType *, 4> Visited;
    if (!OpDefine || !OpType || isConstReg(MRI, OpDefine, Visited) ||
        OpDefine->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST ||
        GR.isAggregateType(OpType)) {
      // The case of G_ADDRSPACE_CAST inside spv_const_composite() is processed
      // by selectAddrSpaceCast()
      CompositeArgs.push_back(OpReg);
      continue;
    }
    MachineFunction *MF = I.getMF();
    Register WrapReg = GR.find(OpDefine, MF);
    if (WrapReg.isValid()) {
      CompositeArgs.push_back(WrapReg);
      continue;
    }
    // Create a new register for the wrapper
    WrapReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
    GR.add(OpDefine, MF, WrapReg);
    CompositeArgs.push_back(WrapReg);
    // Decorate the wrapper register and generate a new instruction
    MRI->setType(WrapReg, LLT::pointer(0, 64));
    GR.assignSPIRVTypeToVReg(OpType, WrapReg, *MF);
    MachineBasicBlock &BB = *I.getParent();
    Result = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
                 .addDef(WrapReg)
                 .addUse(GR.getSPIRVTypeID(OpType))
                 .addImm(static_cast<uint32_t>(SPIRV::Opcode::Bitcast))
                 .addUse(OpReg)
                 .constrainAllUses(TII, TRI, RBI);
    if (!Result)
      break;
  }
  return Result;
}

bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
                                               const SPIRVType *ResType,
                                               MachineInstr &I) const {
  MachineBasicBlock &BB = *I.getParent();
  Intrinsic::ID IID = cast<GIntrinsic>(I).getIntrinsicID();
  switch (IID) {
  case Intrinsic::spv_load:
    return selectLoad(ResVReg, ResType, I);
  case Intrinsic::spv_store:
    return selectStore(I);
  case Intrinsic::spv_extractv:
    return selectExtractVal(ResVReg, ResType, I);
  case Intrinsic::spv_insertv:
    return selectInsertVal(ResVReg, ResType, I);
  case Intrinsic::spv_extractelt:
    return selectExtractElt(ResVReg, ResType, I);
  case Intrinsic::spv_insertelt:
    return selectInsertElt(ResVReg, ResType, I);
  case Intrinsic::spv_gep:
    return selectGEP(ResVReg, ResType, I);
  case Intrinsic::spv_unref_global:
  case Intrinsic::spv_init_global: {
    MachineInstr *MI = MRI->getVRegDef(I.getOperand(1).getReg());
    MachineInstr *Init = I.getNumExplicitOperands() > 2
                             ? MRI->getVRegDef(I.getOperand(2).getReg())
                             : nullptr;
    assert(MI);
    return selectGlobalValue(MI->getOperand(0).getReg(), *MI, Init);
  }
  case Intrinsic::spv_undef: {
    auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
                   .addDef(ResVReg)
                   .addUse(GR.getSPIRVTypeID(ResType));
    return MIB.constrainAllUses(TII, TRI, RBI);
  }
  case Intrinsic::spv_const_composite: {
    // If no values are attached, the composite is null constant.
    bool IsNull = I.getNumExplicitDefs() + 1 == I.getNumExplicitOperands();
    // Select a proper instruction.
    unsigned Opcode = SPIRV::OpConstantNull;
    SmallVector<Register> CompositeArgs;
    if (!IsNull) {
      Opcode = SPIRV::OpConstantComposite;
      if (!wrapIntoSpecConstantOp(I, CompositeArgs))
        return false;
    }
    auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
                   .addDef(ResVReg)
                   .addUse(GR.getSPIRVTypeID(ResType));
    // skip type MD node we already used when generated assign.type for this
    if (!IsNull) {
      for (Register OpReg : CompositeArgs)
        MIB.addUse(OpReg);
    }
    return MIB.constrainAllUses(TII, TRI, RBI);
  }
  case Intrinsic::spv_assign_name: {
    auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpName));
    MIB.addUse(I.getOperand(I.getNumExplicitDefs() + 1).getReg());
    for (unsigned i = I.getNumExplicitDefs() + 2;
         i < I.getNumExplicitOperands(); ++i) {
      MIB.addImm(I.getOperand(i).getImm());
    }
    return MIB.constrainAllUses(TII, TRI, RBI);
  }
  case Intrinsic::spv_switch: {
    auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSwitch));
    for (unsigned i = 1; i < I.getNumExplicitOperands(); ++i) {
      if (I.getOperand(i).isReg())
        MIB.addReg(I.getOperand(i).getReg());
      else if (I.getOperand(i).isCImm())
        addNumImm(I.getOperand(i).getCImm()->getValue(), MIB);
      else if (I.getOperand(i).isMBB())
        MIB.addMBB(I.getOperand(i).getMBB());
      else
        llvm_unreachable("Unexpected OpSwitch operand");
    }
    return MIB.constrainAllUses(TII, TRI, RBI);
  }
  case Intrinsic::spv_loop_merge:
  case Intrinsic::spv_selection_merge: {
    const auto Opcode = IID == Intrinsic::spv_selection_merge
                            ? SPIRV::OpSelectionMerge
                            : SPIRV::OpLoopMerge;
    auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode));
    for (unsigned i = 1; i < I.getNumExplicitOperands(); ++i) {
      assert(I.getOperand(i).isMBB());
      MIB.addMBB(I.getOperand(i).getMBB());
    }
    MIB.addImm(SPIRV::SelectionControl::None);
    return MIB.constrainAllUses(TII, TRI, RBI);
  }
  case Intrinsic::spv_cmpxchg:
    return selectAtomicCmpXchg(ResVReg, ResType, I);
  case Intrinsic::spv_unreachable:
    BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpUnreachable));
    break;
  case Intrinsic::spv_alloca:
    return selectFrameIndex(ResVReg, ResType, I);
  case Intrinsic::spv_alloca_array:
    return selectAllocaArray(ResVReg, ResType, I);
  case Intrinsic::spv_assume:
    if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
      BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpAssumeTrueKHR))
          .addUse(I.getOperand(1).getReg());
    break;
  case Intrinsic::spv_expect:
    if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
      BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExpectKHR))
          .addDef(ResVReg)
          .addUse(GR.getSPIRVTypeID(ResType))
          .addUse(I.getOperand(2).getReg())
          .addUse(I.getOperand(3).getReg());
    break;
  case Intrinsic::arithmetic_fence:
    if (STI.canUseExtension(SPIRV::Extension::SPV_EXT_arithmetic_fence))
      BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpArithmeticFenceEXT))
          .addDef(ResVReg)
          .addUse(GR.getSPIRVTypeID(ResType))
          .addUse(I.getOperand(2).getReg());
    else
      BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg)
          .addUse(I.getOperand(2).getReg());
    break;
  case Intrinsic::spv_thread_id:
    return selectSpvThreadId(ResVReg, ResType, I);
  case Intrinsic::spv_fdot:
    return selectFloatDot(ResVReg, ResType, I);
  case Intrinsic::spv_udot:
  case Intrinsic::spv_sdot:
    return selectIntegerDot(ResVReg, ResType, I);
  case Intrinsic::spv_all:
    return selectAll(ResVReg, ResType, I);
  case Intrinsic::spv_any:
    return selectAny(ResVReg, ResType, I);
  case Intrinsic::spv_lerp:
    return selectExtInst(ResVReg, ResType, I, CL::mix, GL::FMix);
  case Intrinsic::spv_length:
    return selectExtInst(ResVReg, ResType, I, CL::length, GL::Length);
  case Intrinsic::spv_frac:
    return selectExtInst(ResVReg, ResType, I, CL::fract, GL::Fract);
  case Intrinsic::spv_normalize:
    return selectExtInst(ResVReg, ResType, I, CL::normalize, GL::Normalize);
  case Intrinsic::spv_rsqrt:
    return selectExtInst(ResVReg, ResType, I, CL::rsqrt, GL::InverseSqrt);
  case Intrinsic::spv_sign:
    return selectSign(ResVReg, ResType, I);
  case Intrinsic::spv_lifetime_start:
  case Intrinsic::spv_lifetime_end: {
    unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
                                                       : SPIRV::OpLifetimeStop;
    int64_t Size = I.getOperand(I.getNumExplicitDefs() + 1).getImm();
    Register PtrReg = I.getOperand(I.getNumExplicitDefs() + 2).getReg();
    if (Size == -1)
      Size = 0;
    BuildMI(BB, I, I.getDebugLoc(), TII.get(Op)).addUse(PtrReg).addImm(Size);
  } break;
  case Intrinsic::spv_saturate:
    return selectSaturate(ResVReg, ResType, I);
  case Intrinsic::spv_wave_is_first_lane: {
    SPIRVType *IntTy = GR.getOrCreateSPIRVIntegerType(32, I, TII);
    return BuildMI(BB, I, I.getDebugLoc(),
                   TII.get(SPIRV::OpGroupNonUniformElect))
        .addDef(ResVReg)
        .addUse(GR.getSPIRVTypeID(ResType))
        .addUse(GR.getOrCreateConstInt(3, I, IntTy, TII));
  }
  case Intrinsic::spv_step:
    return selectExtInst(ResVReg, ResType, I, CL::step, GL::Step);
  // Discard intrinsics which we do not expect to actually represent code after
  // lowering or intrinsics which are not implemented but should not crash when
  // found in a customer's LLVM IR input.
  case Intrinsic::instrprof_increment:
  case Intrinsic::instrprof_increment_step:
  case Intrinsic::instrprof_value_profile:
    break;
  // Discard internal intrinsics.
  case Intrinsic::spv_value_md:
    break;
  default: {
    std::string DiagMsg;
    raw_string_ostream OS(DiagMsg);
    I.print(OS);
    DiagMsg = "Intrinsic selection not implemented: " + DiagMsg;
    report_fatal_error(DiagMsg.c_str(), false);
  }
  }
  return true;
}

bool SPIRVInstructionSelector::selectAllocaArray(Register ResVReg,
                                                 const SPIRVType *ResType,
                                                 MachineInstr &I) const {
  // there was an allocation size parameter to the allocation instruction
  // that is not 1
  MachineBasicBlock &BB = *I.getParent();
  return BuildMI(BB, I, I.getDebugLoc(),
                 TII.get(SPIRV::OpVariableLengthArrayINTEL))
      .addDef(ResVReg)
      .addUse(GR.getSPIRVTypeID(ResType))
      .addUse(I.getOperand(2).getReg())
      .constrainAllUses(TII, TRI, RBI);
}

bool SPIRVInstructionSelector::selectFrameIndex(Register ResVReg,
                                                const SPIRVType *ResType,
                                                MachineInstr &I) const {
  // Change order of instructions if needed: all OpVariable instructions in a
  // function must be the first instructions in the first block
  MachineFunction *MF = I.getParent()->getParent();
  MachineBasicBlock *MBB = &MF->front();
  auto It = MBB->SkipPHIsAndLabels(MBB->begin()), E = MBB->end();
  bool IsHeader = false;
  unsigned Opcode;
  for (; It != E && It != I; ++It) {
    Opcode = It->getOpcode();
    if (Opcode == SPIRV::OpFunction || Opcode == SPIRV::OpFunctionParameter) {
      IsHeader = true;
    } else if (IsHeader &&
               !(Opcode == SPIRV::ASSIGN_TYPE || Opcode == SPIRV::OpLabel)) {
      ++It;
      break;
    }
  }
  return BuildMI(*MBB, It, It->getDebugLoc(), TII.get(SPIRV::OpVariable))
      .addDef(ResVReg)
      .addUse(GR.getSPIRVTypeID(ResType))
      .addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function))
      .constrainAllUses(TII, TRI, RBI);
}

bool SPIRVInstructionSelector::selectBranch(MachineInstr &I) const {
  // InstructionSelector walks backwards through the instructions. We can use
  // both a G_BR and a G_BRCOND to create an OpBranchConditional. We hit G_BR
  // first, so can generate an OpBranchConditional here. If there is no
  // G_BRCOND, we just use OpBranch for a regular unconditional branch.
  const MachineInstr *PrevI = I.getPrevNode();
  MachineBasicBlock &MBB = *I.getParent();
  if (PrevI != nullptr && PrevI->getOpcode() == TargetOpcode::G_BRCOND) {
    return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
        .addUse(PrevI->getOperand(0).getReg())
        .addMBB(PrevI->getOperand(1).getMBB())
        .addMBB(I.getOperand(0).getMBB())
        .constrainAllUses(TII, TRI, RBI);
  }
  return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranch))
      .addMBB(I.getOperand(0).getMBB())
      .constrainAllUses(TII, TRI, RBI);
}

bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &I) const {
  // InstructionSelector walks backwards through the instructions. For an
  // explicit conditional branch with no fallthrough, we use both a G_BR and a
  // G_BRCOND to create an OpBranchConditional. We should hit G_BR first, and
  // generate the OpBranchConditional in selectBranch above.
  //
  // If an OpBranchConditional has been generated, we simply return, as the work
  // is alread done. If there is no OpBranchConditional, LLVM must be relying on
  // implicit fallthrough to the next basic block, so we need to create an
  // OpBranchConditional with an explicit "false" argument pointing to the next
  // basic block that LLVM would fall through to.
  const MachineInstr *NextI = I.getNextNode();
  // Check if this has already been successfully selected.
  if (NextI != nullptr && NextI->getOpcode() == SPIRV::OpBranchConditional)
    return true;
  // Must be relying on implicit block fallthrough, so generate an
  // OpBranchConditional with the "next" basic block as the "false" target.
  MachineBasicBlock &MBB = *I.getParent();
  unsigned NextMBBNum = MBB.getNextNode()->getNumber();
  MachineBasicBlock *NextMBB = I.getMF()->getBlockNumbered(NextMBBNum);
  return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
      .addUse(I.getOperand(0).getReg())
      .addMBB(I.getOperand(1).getMBB())
      .addMBB(NextMBB)
      .constrainAllUses(TII, TRI, RBI);
}

bool SPIRVInstructionSelector::selectPhi(Register ResVReg,
                                         const SPIRVType *ResType,
                                         MachineInstr &I) const {
  auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpPhi))
                 .addDef(ResVReg)
                 .addUse(GR.getSPIRVTypeID(ResType));
  const unsigned NumOps = I.getNumOperands();
  for (unsigned i = 1; i < NumOps; i += 2) {
    MIB.addUse(I.getOperand(i + 0).getReg());
    MIB.addMBB(I.getOperand(i + 1).getMBB());
  }
  return MIB.constrainAllUses(TII, TRI, RBI);
}

bool SPIRVInstructionSelector::selectGlobalValue(
    Register ResVReg, MachineInstr &I, const MachineInstr *Init) const {
  // FIXME: don't use MachineIRBuilder here, replace it with BuildMI.
  MachineIRBuilder MIRBuilder(I);
  const GlobalValue *GV = I.getOperand(1).getGlobal();
  Type *GVType = toTypedPointer(GR.getDeducedGlobalValueType(GV));
  SPIRVType *PointerBaseType;
  if (GVType->isArrayTy()) {
    SPIRVType *ArrayElementType =
        GR.getOrCreateSPIRVType(GVType->getArrayElementType(), MIRBuilder,
                                SPIRV::AccessQualifier::ReadWrite, false);
    PointerBaseType = GR.getOrCreateSPIRVArrayType(
        ArrayElementType, GVType->getArrayNumElements(), I, TII);
  } else {
    PointerBaseType = GR.getOrCreateSPIRVType(
        GVType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false);
  }
  SPIRVType *ResType = GR.getOrCreateSPIRVPointerType(
      PointerBaseType, I, TII,
      addressSpaceToStorageClass(GV->getAddressSpace(), STI));

  std::string GlobalIdent;
  if (!GV->hasName()) {
    unsigned &ID = UnnamedGlobalIDs[GV];
    if (ID == 0)
      ID = UnnamedGlobalIDs.size();
    GlobalIdent = "__unnamed_" + Twine(ID).str();
  } else {
    GlobalIdent = GV->getGlobalIdentifier();
  }

  // Behaviour of functions as operands depends on availability of the
  // corresponding extension (SPV_INTEL_function_pointers):
  // - If there is an extension to operate with functions as operands:
  // We create a proper constant operand and evaluate a correct type for a
  // function pointer.
  // - Without the required extension:
  // We have functions as operands in tests with blocks of instruction e.g. in
  // transcoding/global_block.ll. These operands are not used and should be
  // substituted by zero constants. Their type is expected to be always
  // OpTypePointer Function %uchar.
  if (isa<Function>(GV)) {
    const Constant *ConstVal = GV;
    MachineBasicBlock &BB = *I.getParent();
    Register NewReg = GR.find(ConstVal, GR.CurMF);
    if (!NewReg.isValid()) {
      Register NewReg = ResVReg;
      GR.add(ConstVal, GR.CurMF, NewReg);
      const Function *GVFun =
          STI.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)
              ? dyn_cast<Function>(GV)
              : nullptr;
      if (GVFun) {
        // References to a function via function pointers generate virtual
        // registers without a definition. We will resolve it later, during
        // module analysis stage.
        MachineRegisterInfo *MRI = MIRBuilder.getMRI();
        Register FuncVReg = MRI->createGenericVirtualRegister(LLT::scalar(64));
        MRI->setRegClass(FuncVReg, &SPIRV::iIDRegClass);
        MachineInstrBuilder MB =
            BuildMI(BB, I, I.getDebugLoc(),
                    TII.get(SPIRV::OpConstantFunctionPointerINTEL))
                .addDef(NewReg)
                .addUse(GR.getSPIRVTypeID(ResType))
                .addUse(FuncVReg);
        // mapping the function pointer to the used Function
        GR.recordFunctionPointer(&MB.getInstr()->getOperand(2), GVFun);
        return MB.constrainAllUses(TII, TRI, RBI);
      }
      return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
          .addDef(NewReg)
          .addUse(GR.getSPIRVTypeID(ResType))
          .constrainAllUses(TII, TRI, RBI);
    }
    assert(NewReg != ResVReg);
    return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
        .addDef(ResVReg)
        .addUse(NewReg)
        .constrainAllUses(TII, TRI, RBI);
  }
  auto GlobalVar = cast<GlobalVariable>(GV);
  assert(GlobalVar->getName() != "llvm.global.annotations");

  bool HasInit = GlobalVar->hasInitializer() &&
                 !isa<UndefValue>(GlobalVar->getInitializer());
  // Skip empty declaration for GVs with initilaizers till we get the decl with
  // passed initializer.
  if (HasInit && !Init)
    return true;

  unsigned AddrSpace = GV->getAddressSpace();
  SPIRV::StorageClass::StorageClass Storage =
      addressSpaceToStorageClass(AddrSpace, STI);
  bool HasLnkTy = GV->getLinkage() != GlobalValue::InternalLinkage &&
                  Storage != SPIRV::StorageClass::Function;
  SPIRV::LinkageType::LinkageType LnkType =
      (GV->isDeclaration() || GV->hasAvailableExternallyLinkage())
          ? SPIRV::LinkageType::Import
          : (GV->getLinkage() == GlobalValue::LinkOnceODRLinkage &&
                     STI.canUseExtension(SPIRV::Extension::SPV_KHR_linkonce_odr)
                 ? SPIRV::LinkageType::LinkOnceODR
                 : SPIRV::LinkageType::Export);

  Register Reg = GR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV,
                                        Storage, Init, GlobalVar->isConstant(),
                                        HasLnkTy, LnkType, MIRBuilder, true);
  return Reg.isValid();
}

bool SPIRVInstructionSelector::selectLog10(Register ResVReg,
                                           const SPIRVType *ResType,
                                           MachineInstr &I) const {
  if (STI.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std)) {
    return selectExtInst(ResVReg, ResType, I, CL::log10);
  }

  // There is no log10 instruction in the GLSL Extended Instruction set, so it
  // is implemented as:
  // log10(x) = log2(x) * (1 / log2(10))
  //          = log2(x) * 0.30103

  MachineIRBuilder MIRBuilder(I);
  MachineBasicBlock &BB = *I.getParent();

  // Build log2(x).
  Register VarReg = MRI->createVirtualRegister(GR.getRegClass(ResType));
  bool Result =
      BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpExtInst))
          .addDef(VarReg)
          .addUse(GR.getSPIRVTypeID(ResType))
          .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
          .addImm(GL::Log2)
          .add(I.getOperand(1))
          .constrainAllUses(TII, TRI, RBI);

  // Build 0.30103.
  assert(ResType->getOpcode() == SPIRV::OpTypeVector ||
         ResType->getOpcode() == SPIRV::OpTypeFloat);
  // TODO: Add matrix implementation once supported by the HLSL frontend.
  const SPIRVType *SpirvScalarType =
      ResType->getOpcode() == SPIRV::OpTypeVector
          ? GR.getSPIRVTypeForVReg(ResType->getOperand(1).getReg())
          : ResType;
  Register ScaleReg =
      GR.buildConstantFP(APFloat(0.30103f), MIRBuilder, SpirvScalarType);

  // Multiply log2(x) by 0.30103 to get log10(x) result.
  auto Opcode = ResType->getOpcode() == SPIRV::OpTypeVector
                    ? SPIRV::OpVectorTimesScalar
                    : SPIRV::OpFMulS;
  Result &= BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
                .addDef(ResVReg)
                .addUse(GR.getSPIRVTypeID(ResType))
                .addUse(VarReg)
                .addUse(ScaleReg)
                .constrainAllUses(TII, TRI, RBI);

  return Result;
}

bool SPIRVInstructionSelector::selectSpvThreadId(Register ResVReg,
                                                 const SPIRVType *ResType,
                                                 MachineInstr &I) const {
  // DX intrinsic: @llvm.dx.thread.id(i32)
  // ID  Name      Description
  // 93  ThreadId  reads the thread ID

  MachineIRBuilder MIRBuilder(I);
  const SPIRVType *U32Type = GR.getOrCreateSPIRVIntegerType(32, MIRBuilder);
  const SPIRVType *Vec3Ty =
      GR.getOrCreateSPIRVVectorType(U32Type, 3, MIRBuilder);
  const SPIRVType *PtrType = GR.getOrCreateSPIRVPointerType(
      Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);

  // Create new register for GlobalInvocationID builtin variable.
  Register NewRegister =
      MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::iIDRegClass);
  MIRBuilder.getMRI()->setType(NewRegister, LLT::pointer(0, 64));
  GR.assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());

  // Build GlobalInvocationID global variable with the necessary decorations.
  Register Variable = GR.buildGlobalVariable(
      NewRegister, PtrType,
      getLinkStringForBuiltIn(SPIRV::BuiltIn::GlobalInvocationId), nullptr,
      SPIRV::StorageClass::Input, nullptr, true, true,
      SPIRV::LinkageType::Import, MIRBuilder, false);

  // Create new register for loading value.
  MachineRegisterInfo *MRI = MIRBuilder.getMRI();
  Register LoadedRegister = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
  MIRBuilder.getMRI()->setType(LoadedRegister, LLT::pointer(0, 64));
  GR.assignSPIRVTypeToVReg(Vec3Ty, LoadedRegister, MIRBuilder.getMF());

  // Load v3uint value from the global variable.
  BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
      .addDef(LoadedRegister)
      .addUse(GR.getSPIRVTypeID(Vec3Ty))
      .addUse(Variable);

  // Get Thread ID index. Expecting operand is a constant immediate value,
  // wrapped in a type assignment.
  assert(I.getOperand(2).isReg());
  Register ThreadIdReg = I.getOperand(2).getReg();
  SPIRVType *ConstTy = this->MRI->getVRegDef(ThreadIdReg);
  assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
         ConstTy->getOperand(1).isReg());
  Register ConstReg = ConstTy->getOperand(1).getReg();
  const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
  assert(Const && Const->getOpcode() == TargetOpcode::G_CONSTANT);
  const llvm::APInt &Val = Const->getOperand(1).getCImm()->getValue();
  const uint32_t ThreadId = Val.getZExtValue();

  // Extract the thread ID from the loaded vector value.
  MachineBasicBlock &BB = *I.getParent();
  auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
                 .addDef(ResVReg)
                 .addUse(GR.getSPIRVTypeID(ResType))
                 .addUse(LoadedRegister)
                 .addImm(ThreadId);
  return MIB.constrainAllUses(TII, TRI, RBI);
}

namespace llvm {
InstructionSelector *
createSPIRVInstructionSelector(const SPIRVTargetMachine &TM,
                               const SPIRVSubtarget &Subtarget,
                               const RegisterBankInfo &RBI) {
  return new SPIRVInstructionSelector(TM, Subtarget, RBI);
}
} // namespace llvm