llvm/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerCombiner.cpp

//=== AArch64PostLegalizerCombiner.cpp --------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// Post-legalization combines on generic MachineInstrs.
///
/// The combines here must preserve instruction legality.
///
/// Lowering combines (e.g. pseudo matching) should be handled by
/// AArch64PostLegalizerLowering.
///
/// Combines which don't rely on instruction legality should go in the
/// AArch64PreLegalizerCombiner.
///
//===----------------------------------------------------------------------===//

#include "AArch64TargetMachine.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/GlobalISel/CSEInfo.h"
#include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h"
#include "llvm/CodeGen/GlobalISel/Combiner.h"
#include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
#include "llvm/CodeGen/GlobalISel/CombinerInfo.h"
#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/Support/Debug.h"

#define GET_GICOMBINER_DEPS
#include "AArch64GenPostLegalizeGICombiner.inc"
#undef GET_GICOMBINER_DEPS

#define DEBUG_TYPE

usingnamespacellvm;
usingnamespaceMIPatternMatch;

namespace {

#define GET_GICOMBINER_TYPES
#include "AArch64GenPostLegalizeGICombiner.inc"
#undef GET_GICOMBINER_TYPES

/// This combine tries do what performExtractVectorEltCombine does in SDAG.
/// Rewrite for pairwise fadd pattern
///   (s32 (g_extract_vector_elt
///           (g_fadd (vXs32 Other)
///                  (g_vector_shuffle (vXs32 Other) undef <1,X,...> )) 0))
/// ->
///   (s32 (g_fadd (g_extract_vector_elt (vXs32 Other) 0)
///              (g_extract_vector_elt (vXs32 Other) 1))
bool matchExtractVecEltPairwiseAdd(
    MachineInstr &MI, MachineRegisterInfo &MRI,
    std::tuple<unsigned, LLT, Register> &MatchInfo) {}

void applyExtractVecEltPairwiseAdd(
    MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &B,
    std::tuple<unsigned, LLT, Register> &MatchInfo) {}

bool isSignExtended(Register R, MachineRegisterInfo &MRI) {}

bool isZeroExtended(Register R, MachineRegisterInfo &MRI) {}

bool matchAArch64MulConstCombine(
    MachineInstr &MI, MachineRegisterInfo &MRI,
    std::function<void(MachineIRBuilder &B, Register DstReg)> &ApplyFn) {}

void applyAArch64MulConstCombine(
    MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &B,
    std::function<void(MachineIRBuilder &B, Register DstReg)> &ApplyFn) {}

/// Try to fold a G_MERGE_VALUES of 2 s32 sources, where the second source
/// is a zero, into a G_ZEXT of the first.
bool matchFoldMergeToZext(MachineInstr &MI, MachineRegisterInfo &MRI) {}

void applyFoldMergeToZext(MachineInstr &MI, MachineRegisterInfo &MRI,
                          MachineIRBuilder &B, GISelChangeObserver &Observer) {}

/// \returns True if a G_ANYEXT instruction \p MI should be mutated to a G_ZEXT
/// instruction.
bool matchMutateAnyExtToZExt(MachineInstr &MI, MachineRegisterInfo &MRI) {}

void applyMutateAnyExtToZExt(MachineInstr &MI, MachineRegisterInfo &MRI,
                             MachineIRBuilder &B,
                             GISelChangeObserver &Observer) {}

/// Match a 128b store of zero and split it into two 64 bit stores, for
/// size/performance reasons.
bool matchSplitStoreZero128(MachineInstr &MI, MachineRegisterInfo &MRI) {}

void applySplitStoreZero128(MachineInstr &MI, MachineRegisterInfo &MRI,
                            MachineIRBuilder &B,
                            GISelChangeObserver &Observer) {}

bool matchOrToBSP(MachineInstr &MI, MachineRegisterInfo &MRI,
                  std::tuple<Register, Register, Register> &MatchInfo) {}

void applyOrToBSP(MachineInstr &MI, MachineRegisterInfo &MRI,
                  MachineIRBuilder &B,
                  std::tuple<Register, Register, Register> &MatchInfo) {}

// Combines Mul(And(Srl(X, 15), 0x10001), 0xffff) into CMLTz
bool matchCombineMulCMLT(MachineInstr &MI, MachineRegisterInfo &MRI,
                         Register &SrcReg) {}

void applyCombineMulCMLT(MachineInstr &MI, MachineRegisterInfo &MRI,
                         MachineIRBuilder &B, Register &SrcReg) {}

class AArch64PostLegalizerCombinerImpl : public Combiner {};

#define GET_GICOMBINER_IMPL
#include "AArch64GenPostLegalizeGICombiner.inc"
#undef GET_GICOMBINER_IMPL

AArch64PostLegalizerCombinerImpl::AArch64PostLegalizerCombinerImpl(
    MachineFunction &MF, CombinerInfo &CInfo, const TargetPassConfig *TPC,
    GISelKnownBits &KB, GISelCSEInfo *CSEInfo,
    const AArch64PostLegalizerCombinerImplRuleConfig &RuleConfig,
    const AArch64Subtarget &STI, MachineDominatorTree *MDT,
    const LegalizerInfo *LI)
    :{}

class AArch64PostLegalizerCombiner : public MachineFunctionPass {};
} // end anonymous namespace

void AArch64PostLegalizerCombiner::getAnalysisUsage(AnalysisUsage &AU) const {}

AArch64PostLegalizerCombiner::AArch64PostLegalizerCombiner(bool IsOptNone)
    :{}

bool AArch64PostLegalizerCombiner::runOnMachineFunction(MachineFunction &MF) {}

bool AArch64PostLegalizerCombiner::tryOptimizeConsecStores(
    SmallVectorImpl<StoreInfo> &Stores, CSEMIRBuilder &MIB) {}

static cl::opt<bool>
    EnableConsecutiveMemOpOpt("aarch64-postlegalizer-consecutive-memops",
                              cl::init(true), cl::Hidden,
                              cl::desc("Enable consecutive memop optimization "
                                       "in AArch64PostLegalizerCombiner"));

bool AArch64PostLegalizerCombiner::optimizeConsecutiveMemOpAddressing(
    MachineFunction &MF, CSEMIRBuilder &MIB) {}

char AArch64PostLegalizerCombiner::ID =;
INITIALIZE_PASS_BEGIN(AArch64PostLegalizerCombiner, DEBUG_TYPE,
                      "Combine AArch64 MachineInstrs after legalization", false,
                      false)
INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
INITIALIZE_PASS_DEPENDENCY(GISelKnownBitsAnalysis)
INITIALIZE_PASS_END(AArch64PostLegalizerCombiner, DEBUG_TYPE,
                    "Combine AArch64 MachineInstrs after legalization", false,
                    false)

namespace llvm {
FunctionPass *createAArch64PostLegalizerCombiner(bool IsOptNone) {}
} // end namespace llvm