llvm/llvm/lib/Target/AMDGPU/SIInstrInfo.h

//===- SIInstrInfo.h - SI Instruction Info Interface ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// Interface definition for SIInstrInfo.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_LIB_TARGET_AMDGPU_SIINSTRINFO_H
#define LLVM_LIB_TARGET_AMDGPU_SIINSTRINFO_H

#include "AMDGPUMIRFormatter.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
#include "SIRegisterInfo.h"
#include "Utils/AMDGPUBaseInfo.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetSchedule.h"

#define GET_INSTRINFO_HEADER
#include "AMDGPUGenInstrInfo.inc"

namespace llvm {

class APInt;
class GCNSubtarget;
class LiveVariables;
class MachineDominatorTree;
class MachineRegisterInfo;
class RegScavenger;
class TargetRegisterClass;
class ScheduleHazardRecognizer;

/// Mark the MMO of a uniform load if there are no potentially clobbering stores
/// on any path from the start of an entry function to this load.
static const MachineMemOperand::Flags MONoClobber =;

/// Mark the MMO of a load as the last use.
static const MachineMemOperand::Flags MOLastUse =;

/// Utility to store machine instructions worklist.
struct SIInstrWorklist {};

class SIInstrInfo final : public AMDGPUGenInstrInfo {};

/// \brief Returns true if a reg:subreg pair P has a TRC class
inline bool isOfRegClass(const TargetInstrInfo::RegSubRegPair &P,
                         const TargetRegisterClass &TRC,
                         MachineRegisterInfo &MRI) {}

/// \brief Create RegSubRegPair from a register MachineOperand
inline
TargetInstrInfo::RegSubRegPair getRegSubRegPair(const MachineOperand &O) {}

/// \brief Return the SubReg component from REG_SEQUENCE
TargetInstrInfo::RegSubRegPair getRegSequenceSubReg(MachineInstr &MI,
                                                    unsigned SubReg);

/// \brief Return the defining instruction for a given reg:subreg pair
/// skipping copy like instructions and subreg-manipulation pseudos.
/// Following another subreg of a reg:subreg isn't supported.
MachineInstr *getVRegSubRegDef(const TargetInstrInfo::RegSubRegPair &P,
                               MachineRegisterInfo &MRI);

/// \brief Return false if EXEC is not changed between the def of \p VReg at \p
/// DefMI and the use at \p UseMI. Should be run on SSA. Currently does not
/// attempt to track between blocks.
bool execMayBeModifiedBeforeUse(const MachineRegisterInfo &MRI,
                                Register VReg,
                                const MachineInstr &DefMI,
                                const MachineInstr &UseMI);

/// \brief Return false if EXEC is not changed between the def of \p VReg at \p
/// DefMI and all its uses. Should be run on SSA. Currently does not attempt to
/// track between blocks.
bool execMayBeModifiedBeforeAnyUse(const MachineRegisterInfo &MRI,
                                   Register VReg,
                                   const MachineInstr &DefMI);

namespace AMDGPU {

  LLVM_READONLY
  int getVOPe64(uint16_t Opcode);

  LLVM_READONLY
  int getVOPe32(uint16_t Opcode);

  LLVM_READONLY
  int getSDWAOp(uint16_t Opcode);

  LLVM_READONLY
  int getDPPOp32(uint16_t Opcode);

  LLVM_READONLY
  int getDPPOp64(uint16_t Opcode);

  LLVM_READONLY
  int getBasicFromSDWAOp(uint16_t Opcode);

  LLVM_READONLY
  int getCommuteRev(uint16_t Opcode);

  LLVM_READONLY
  int getCommuteOrig(uint16_t Opcode);

  LLVM_READONLY
  int getAddr64Inst(uint16_t Opcode);

  /// Check if \p Opcode is an Addr64 opcode.
  ///
  /// \returns \p Opcode if it is an Addr64 opcode, otherwise -1.
  LLVM_READONLY
  int getIfAddr64Inst(uint16_t Opcode);

  LLVM_READONLY
  int getSOPKOp(uint16_t Opcode);

  /// \returns SADDR form of a FLAT Global instruction given an \p Opcode
  /// of a VADDR form.
  LLVM_READONLY
  int getGlobalSaddrOp(uint16_t Opcode);

  /// \returns VADDR form of a FLAT Global instruction given an \p Opcode
  /// of a SADDR form.
  LLVM_READONLY
  int getGlobalVaddrOp(uint16_t Opcode);

  LLVM_READONLY
  int getVCMPXNoSDstOp(uint16_t Opcode);

  /// \returns ST form with only immediate offset of a FLAT Scratch instruction
  /// given an \p Opcode of an SS (SADDR) form.
  LLVM_READONLY
  int getFlatScratchInstSTfromSS(uint16_t Opcode);

  /// \returns SV (VADDR) form of a FLAT Scratch instruction given an \p Opcode
  /// of an SVS (SADDR + VADDR) form.
  LLVM_READONLY
  int getFlatScratchInstSVfromSVS(uint16_t Opcode);

  /// \returns SS (SADDR) form of a FLAT Scratch instruction given an \p Opcode
  /// of an SV (VADDR) form.
  LLVM_READONLY
  int getFlatScratchInstSSfromSV(uint16_t Opcode);

  /// \returns SV (VADDR) form of a FLAT Scratch instruction given an \p Opcode
  /// of an SS (SADDR) form.
  LLVM_READONLY
  int getFlatScratchInstSVfromSS(uint16_t Opcode);

  /// \returns earlyclobber version of a MAC MFMA is exists.
  LLVM_READONLY
  int getMFMAEarlyClobberOp(uint16_t Opcode);

  /// \returns v_cmpx version of a v_cmp instruction.
  LLVM_READONLY
  int getVCMPXOpFromVCMP(uint16_t Opcode);

  const uint64_t RSRC_DATA_FORMAT =;
  const uint64_t RSRC_ELEMENT_SIZE_SHIFT =;
  const uint64_t RSRC_INDEX_STRIDE_SHIFT =;
  const uint64_t RSRC_TID_ENABLE =1) << (32 + 23);

} // end namespace AMDGPU

namespace AMDGPU {
enum AsmComments {};
} // namespace AMDGPU

namespace SI {
namespace KernelInputOffsets {

/// Offsets in bytes from the start of the input buffer
enum Offsets {};

} // end namespace KernelInputOffsets
} // end namespace SI

} // end namespace llvm

#endif // LLVM_LIB_TARGET_AMDGPU_SIINSTRINFO_H