llvm/llvm/lib/Target/X86/X86InstCombineIntrinsic.cpp

//===-- X86InstCombineIntrinsic.cpp - X86 specific InstCombine pass -------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file implements a TargetTransformInfo analysis pass specific to the
/// X86 target machine. It uses the target's detailed information to provide
/// more precise answers to certain TTI queries, while letting the target
/// independent and default TTI implementations handle the rest.
///
//===----------------------------------------------------------------------===//

#include "X86TargetTransformInfo.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/IntrinsicsX86.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Transforms/InstCombine/InstCombiner.h"
#include <optional>

usingnamespacellvm;
usingnamespacellvm::PatternMatch;

#define DEBUG_TYPE

/// Return a constant boolean vector that has true elements in all positions
/// where the input constant data vector has an element with the sign bit set.
static Constant *getNegativeIsTrueBoolVec(Constant *V, const DataLayout &DL) {}

/// Convert the x86 XMM integer vector mask to a vector of bools based on
/// each element's most significant bit (the sign bit).
static Value *getBoolVecFromMask(Value *Mask, const DataLayout &DL) {}

// TODO: If the x86 backend knew how to convert a bool vector mask back to an
// XMM register mask efficiently, we could transform all x86 masked intrinsics
// to LLVM masked intrinsics and remove the x86 masked intrinsic defs.
static Instruction *simplifyX86MaskedLoad(IntrinsicInst &II, InstCombiner &IC) {}

// TODO: If the x86 backend knew how to convert a bool vector mask back to an
// XMM register mask efficiently, we could transform all x86 masked intrinsics
// to LLVM masked intrinsics and remove the x86 masked intrinsic defs.
static bool simplifyX86MaskedStore(IntrinsicInst &II, InstCombiner &IC) {}

static Value *simplifyX86immShift(const IntrinsicInst &II,
                                  InstCombiner::BuilderTy &Builder) {}

// Attempt to simplify AVX2 per-element shift intrinsics to a generic IR shift.
// Unlike the generic IR shifts, the intrinsics have defined behaviour for out
// of range shift amounts (logical - set to zero, arithmetic - splat sign bit).
static Value *simplifyX86varShift(const IntrinsicInst &II,
                                  InstCombiner::BuilderTy &Builder) {}

static Value *simplifyX86pack(IntrinsicInst &II,
                              InstCombiner::BuilderTy &Builder, bool IsSigned) {}

static Value *simplifyX86pmulh(IntrinsicInst &II,
                               InstCombiner::BuilderTy &Builder, bool IsSigned,
                               bool IsRounding) {}

static Value *simplifyX86pmadd(IntrinsicInst &II,
                               InstCombiner::BuilderTy &Builder,
                               bool IsPMADDWD) {}

static Value *simplifyX86movmsk(const IntrinsicInst &II,
                                InstCombiner::BuilderTy &Builder) {}

static Value *simplifyX86addcarry(const IntrinsicInst &II,
                                  InstCombiner::BuilderTy &Builder) {}

static Value *simplifyTernarylogic(const IntrinsicInst &II,
                                   InstCombiner::BuilderTy &Builder) {}

static Value *simplifyX86insertps(const IntrinsicInst &II,
                                  InstCombiner::BuilderTy &Builder) {}

/// Attempt to simplify SSE4A EXTRQ/EXTRQI instructions using constant folding
/// or conversion to a shuffle vector.
static Value *simplifyX86extrq(IntrinsicInst &II, Value *Op0,
                               ConstantInt *CILength, ConstantInt *CIIndex,
                               InstCombiner::BuilderTy &Builder) {}

/// Attempt to simplify SSE4A INSERTQ/INSERTQI instructions using constant
/// folding or conversion to a shuffle vector.
static Value *simplifyX86insertq(IntrinsicInst &II, Value *Op0, Value *Op1,
                                 APInt APLength, APInt APIndex,
                                 InstCombiner::BuilderTy &Builder) {}

/// Attempt to convert pshufb* to shufflevector if the mask is constant.
static Value *simplifyX86pshufb(const IntrinsicInst &II,
                                InstCombiner::BuilderTy &Builder) {}

/// Attempt to convert vpermilvar* to shufflevector if the mask is constant.
static Value *simplifyX86vpermilvar(const IntrinsicInst &II,
                                    InstCombiner::BuilderTy &Builder) {}

/// Attempt to convert vpermd/vpermps to shufflevector if the mask is constant.
static Value *simplifyX86vpermv(const IntrinsicInst &II,
                                InstCombiner::BuilderTy &Builder) {}

/// Attempt to convert vpermi2/vpermt2 to shufflevector if the mask is constant.
static Value *simplifyX86vpermv3(const IntrinsicInst &II,
                                 InstCombiner::BuilderTy &Builder) {}

// Simplify VPERMV/VPERMV3 mask - only demand the active index bits.
static bool simplifyX86VPERMMask(Instruction *II, bool IsBinary,
                                 InstCombiner &IC) {}

std::optional<Instruction *>
X86TTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {}

std::optional<Value *> X86TTIImpl::simplifyDemandedUseBitsIntrinsic(
    InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known,
    bool &KnownBitsComputed) const {}

std::optional<Value *> X86TTIImpl::simplifyDemandedVectorEltsIntrinsic(
    InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
    APInt &UndefElts2, APInt &UndefElts3,
    std::function<void(Instruction *, unsigned, APInt, APInt &)>
        simplifyAndSetOp) const {}