llvm/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp

//=== AArch64PostLegalizerLowering.cpp --------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// Post-legalization lowering for instructions.
///
/// This is used to offload pattern matching from the selector.
///
/// For example, this combiner will notice that a G_SHUFFLE_VECTOR is actually
/// a G_ZIP, G_UZP, etc.
///
/// General optimization combines should be handled by either the
/// AArch64PostLegalizerCombiner or the AArch64PreLegalizerCombiner.
///
//===----------------------------------------------------------------------===//

#include "AArch64ExpandImm.h"
#include "AArch64GlobalISelUtils.h"
#include "AArch64PerfectShuffle.h"
#include "AArch64Subtarget.h"
#include "AArch64TargetMachine.h"
#include "GISel/AArch64LegalizerInfo.h"
#include "MCTargetDesc/AArch64MCTargetDesc.h"
#include "TargetInfo/AArch64TargetInfo.h"
#include "Utils/AArch64BaseInfo.h"
#include "llvm/CodeGen/GlobalISel/Combiner.h"
#include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
#include "llvm/CodeGen/GlobalISel/CombinerInfo.h"
#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/InitializePasses.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include <optional>

#define GET_GICOMBINER_DEPS
#include "AArch64GenPostLegalizeGILowering.inc"
#undef GET_GICOMBINER_DEPS

#define DEBUG_TYPE

usingnamespacellvm;
usingnamespaceMIPatternMatch;
usingnamespaceAArch64GISelUtils;

namespace {

#define GET_GICOMBINER_TYPES
#include "AArch64GenPostLegalizeGILowering.inc"
#undef GET_GICOMBINER_TYPES

/// Represents a pseudo instruction which replaces a G_SHUFFLE_VECTOR.
///
/// Used for matching target-supported shuffles before codegen.
struct ShuffleVectorPseudo {};

/// Check if a G_EXT instruction can handle a shuffle mask \p M when the vector
/// sources of the shuffle are different.
std::optional<std::pair<bool, uint64_t>> getExtMask(ArrayRef<int> M,
                                                    unsigned NumElts) {}

/// Helper function for matchINS.
///
/// \returns a value when \p M is an ins mask for \p NumInputElements.
///
/// First element of the returned pair is true when the produced
/// G_INSERT_VECTOR_ELT destination should be the LHS of the G_SHUFFLE_VECTOR.
///
/// Second element is the destination lane for the G_INSERT_VECTOR_ELT.
std::optional<std::pair<bool, int>> isINSMask(ArrayRef<int> M,
                                              int NumInputElements) {}

/// \return true if a G_SHUFFLE_VECTOR instruction \p MI can be replaced with a
/// G_REV instruction. Returns the appropriate G_REV opcode in \p Opc.
bool matchREV(MachineInstr &MI, MachineRegisterInfo &MRI,
              ShuffleVectorPseudo &MatchInfo) {}

/// \return true if a G_SHUFFLE_VECTOR instruction \p MI can be replaced with
/// a G_TRN1 or G_TRN2 instruction.
bool matchTRN(MachineInstr &MI, MachineRegisterInfo &MRI,
              ShuffleVectorPseudo &MatchInfo) {}

/// \return true if a G_SHUFFLE_VECTOR instruction \p MI can be replaced with
/// a G_UZP1 or G_UZP2 instruction.
///
/// \param [in] MI - The shuffle vector instruction.
/// \param [out] MatchInfo - Either G_UZP1 or G_UZP2 on success.
bool matchUZP(MachineInstr &MI, MachineRegisterInfo &MRI,
              ShuffleVectorPseudo &MatchInfo) {}

bool matchZip(MachineInstr &MI, MachineRegisterInfo &MRI,
              ShuffleVectorPseudo &MatchInfo) {}

/// Helper function for matchDup.
bool matchDupFromInsertVectorElt(int Lane, MachineInstr &MI,
                                 MachineRegisterInfo &MRI,
                                 ShuffleVectorPseudo &MatchInfo) {}

/// Helper function for matchDup.
bool matchDupFromBuildVector(int Lane, MachineInstr &MI,
                             MachineRegisterInfo &MRI,
                             ShuffleVectorPseudo &MatchInfo) {}

bool matchDup(MachineInstr &MI, MachineRegisterInfo &MRI,
              ShuffleVectorPseudo &MatchInfo) {}

// Check if an EXT instruction can handle the shuffle mask when the vector
// sources of the shuffle are the same.
bool isSingletonExtMask(ArrayRef<int> M, LLT Ty) {}

bool matchEXT(MachineInstr &MI, MachineRegisterInfo &MRI,
              ShuffleVectorPseudo &MatchInfo) {}

/// Replace a G_SHUFFLE_VECTOR instruction with a pseudo.
/// \p Opc is the opcode to use. \p MI is the G_SHUFFLE_VECTOR.
void applyShuffleVectorPseudo(MachineInstr &MI,
                              ShuffleVectorPseudo &MatchInfo) {}

/// Replace a G_SHUFFLE_VECTOR instruction with G_EXT.
/// Special-cased because the constant operand must be emitted as a G_CONSTANT
/// for the imported tablegen patterns to work.
void applyEXT(MachineInstr &MI, ShuffleVectorPseudo &MatchInfo) {}

bool matchNonConstInsert(MachineInstr &MI, MachineRegisterInfo &MRI) {}

void applyNonConstInsert(MachineInstr &MI, MachineRegisterInfo &MRI,
                         MachineIRBuilder &Builder) {}

/// Match a G_SHUFFLE_VECTOR with a mask which corresponds to a
/// G_INSERT_VECTOR_ELT and G_EXTRACT_VECTOR_ELT pair.
///
/// e.g.
///   %shuf = G_SHUFFLE_VECTOR %left, %right, shufflemask(0, 0)
///
/// Can be represented as
///
///   %extract = G_EXTRACT_VECTOR_ELT %left, 0
///   %ins = G_INSERT_VECTOR_ELT %left, %extract, 1
///
bool matchINS(MachineInstr &MI, MachineRegisterInfo &MRI,
              std::tuple<Register, int, Register, int> &MatchInfo) {}

void applyINS(MachineInstr &MI, MachineRegisterInfo &MRI,
              MachineIRBuilder &Builder,
              std::tuple<Register, int, Register, int> &MatchInfo) {}

/// isVShiftRImm - Check if this is a valid vector for the immediate
/// operand of a vector shift right operation. The value must be in the range:
///   1 <= Value <= ElementBits for a right shift.
bool isVShiftRImm(Register Reg, MachineRegisterInfo &MRI, LLT Ty,
                  int64_t &Cnt) {}

/// Match a vector G_ASHR or G_LSHR with a valid immediate shift.
bool matchVAshrLshrImm(MachineInstr &MI, MachineRegisterInfo &MRI,
                       int64_t &Imm) {}

void applyVAshrLshrImm(MachineInstr &MI, MachineRegisterInfo &MRI,
                       int64_t &Imm) {}

/// Determine if it is possible to modify the \p RHS and predicate \p P of a
/// G_ICMP instruction such that the right-hand side is an arithmetic immediate.
///
/// \returns A pair containing the updated immediate and predicate which may
/// be used to optimize the instruction.
///
/// \note This assumes that the comparison has been legalized.
std::optional<std::pair<uint64_t, CmpInst::Predicate>>
tryAdjustICmpImmAndPred(Register RHS, CmpInst::Predicate P,
                        const MachineRegisterInfo &MRI) {}

/// Determine whether or not it is possible to update the RHS and predicate of
/// a G_ICMP instruction such that the RHS will be selected as an arithmetic
/// immediate.
///
/// \p MI - The G_ICMP instruction
/// \p MatchInfo - The new RHS immediate and predicate on success
///
/// See tryAdjustICmpImmAndPred for valid transformations.
bool matchAdjustICmpImmAndPred(
    MachineInstr &MI, const MachineRegisterInfo &MRI,
    std::pair<uint64_t, CmpInst::Predicate> &MatchInfo) {}

void applyAdjustICmpImmAndPred(
    MachineInstr &MI, std::pair<uint64_t, CmpInst::Predicate> &MatchInfo,
    MachineIRBuilder &MIB, GISelChangeObserver &Observer) {}

bool matchDupLane(MachineInstr &MI, MachineRegisterInfo &MRI,
                  std::pair<unsigned, int> &MatchInfo) {}

void applyDupLane(MachineInstr &MI, MachineRegisterInfo &MRI,
                  MachineIRBuilder &B, std::pair<unsigned, int> &MatchInfo) {}

bool matchScalarizeVectorUnmerge(MachineInstr &MI, MachineRegisterInfo &MRI) {}

void applyScalarizeVectorUnmerge(MachineInstr &MI, MachineRegisterInfo &MRI,
                                 MachineIRBuilder &B) {}

bool matchBuildVectorToDup(MachineInstr &MI, MachineRegisterInfo &MRI) {}

void applyBuildVectorToDup(MachineInstr &MI, MachineRegisterInfo &MRI,
                           MachineIRBuilder &B) {}

/// \returns how many instructions would be saved by folding a G_ICMP's shift
/// and/or extension operations.
unsigned getCmpOperandFoldingProfit(Register CmpOp, MachineRegisterInfo &MRI) {}

/// \returns true if it would be profitable to swap the LHS and RHS of a G_ICMP
/// instruction \p MI.
bool trySwapICmpOperands(MachineInstr &MI, MachineRegisterInfo &MRI) {}

void applySwapICmpOperands(MachineInstr &MI, GISelChangeObserver &Observer) {}

/// \returns a function which builds a vector floating point compare instruction
/// for a condition code \p CC.
/// \param [in] IsZero - True if the comparison is against 0.
/// \param [in] NoNans - True if the target has NoNansFPMath.
std::function<Register(MachineIRBuilder &)>
getVectorFCMP(AArch64CC::CondCode CC, Register LHS, Register RHS, bool IsZero,
              bool NoNans, MachineRegisterInfo &MRI) {}

/// Try to lower a vector G_FCMP \p MI into an AArch64-specific pseudo.
bool matchLowerVectorFCMP(MachineInstr &MI, MachineRegisterInfo &MRI,
                          MachineIRBuilder &MIB) {}

/// Try to lower a vector G_FCMP \p MI into an AArch64-specific pseudo.
void applyLowerVectorFCMP(MachineInstr &MI, MachineRegisterInfo &MRI,
                          MachineIRBuilder &MIB) {}

// Matches G_BUILD_VECTOR where at least one source operand is not a constant
bool matchLowerBuildToInsertVecElt(MachineInstr &MI, MachineRegisterInfo &MRI) {}

void applyLowerBuildToInsertVecElt(MachineInstr &MI, MachineRegisterInfo &MRI,
                                   MachineIRBuilder &B) {}

bool matchFormTruncstore(MachineInstr &MI, MachineRegisterInfo &MRI,
                         Register &SrcReg) {}

void applyFormTruncstore(MachineInstr &MI, MachineRegisterInfo &MRI,
                         MachineIRBuilder &B, GISelChangeObserver &Observer,
                         Register &SrcReg) {}

// Lower vector G_SEXT_INREG back to shifts for selection. We allowed them to
// form in the first place for combine opportunities, so any remaining ones
// at this stage need be lowered back.
bool matchVectorSextInReg(MachineInstr &MI, MachineRegisterInfo &MRI) {}

void applyVectorSextInReg(MachineInstr &MI, MachineRegisterInfo &MRI,
                          MachineIRBuilder &B, GISelChangeObserver &Observer) {}

/// Combine <N x t>, unused = unmerge(G_EXT <2*N x t> v, undef, N)
///           => unused, <N x t> = unmerge v
bool matchUnmergeExtToUnmerge(MachineInstr &MI, MachineRegisterInfo &MRI,
                              Register &MatchInfo) {}

void applyUnmergeExtToUnmerge(MachineInstr &MI, MachineRegisterInfo &MRI,
                              MachineIRBuilder &B,
                              GISelChangeObserver &Observer, Register &SrcReg) {}

// Match mul({z/s}ext , {z/s}ext) => {u/s}mull OR
// Match v2s64 mul instructions, which will then be scalarised later on
// Doing these two matches in one function to ensure that the order of matching
// will always be the same.
// Try lowering MUL to MULL before trying to scalarize if needed.
bool matchExtMulToMULL(MachineInstr &MI, MachineRegisterInfo &MRI) {}

void applyExtMulToMULL(MachineInstr &MI, MachineRegisterInfo &MRI,
                       MachineIRBuilder &B, GISelChangeObserver &Observer) {}

class AArch64PostLegalizerLoweringImpl : public Combiner {};

#define GET_GICOMBINER_IMPL
#include "AArch64GenPostLegalizeGILowering.inc"
#undef GET_GICOMBINER_IMPL

AArch64PostLegalizerLoweringImpl::AArch64PostLegalizerLoweringImpl(
    MachineFunction &MF, CombinerInfo &CInfo, const TargetPassConfig *TPC,
    GISelCSEInfo *CSEInfo,
    const AArch64PostLegalizerLoweringImplRuleConfig &RuleConfig,
    const AArch64Subtarget &STI)
    :{}

class AArch64PostLegalizerLowering : public MachineFunctionPass {};
} // end anonymous namespace

void AArch64PostLegalizerLowering::getAnalysisUsage(AnalysisUsage &AU) const {}

AArch64PostLegalizerLowering::AArch64PostLegalizerLowering()
    :{}

bool AArch64PostLegalizerLowering::runOnMachineFunction(MachineFunction &MF) {}

char AArch64PostLegalizerLowering::ID =;
INITIALIZE_PASS_BEGIN(AArch64PostLegalizerLowering, DEBUG_TYPE,
                      "Lower AArch64 MachineInstrs after legalization", false,
                      false)
INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
INITIALIZE_PASS_END(AArch64PostLegalizerLowering, DEBUG_TYPE,
                    "Lower AArch64 MachineInstrs after legalization", false,
                    false)

namespace llvm {
FunctionPass *createAArch64PostLegalizerLowering() {}
} // end namespace llvm