llvm/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp

//===- InstCombineAndOrXor.cpp --------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the visitAnd, visitOr, and visitXor functions.
//
//===----------------------------------------------------------------------===//

#include "InstCombineInternal.h"
#include "llvm/Analysis/CmpInstAnalysis.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/Transforms/InstCombine/InstCombiner.h"
#include "llvm/Transforms/Utils/Local.h"

usingnamespacellvm;
usingnamespacePatternMatch;

#define DEBUG_TYPE

/// This is the complement of getICmpCode, which turns an opcode and two
/// operands into either a constant true or false, or a brand new ICmp
/// instruction. The sign is passed in to determine which kind of predicate to
/// use in the new icmp instruction.
static Value *getNewICmpValue(unsigned Code, bool Sign, Value *LHS, Value *RHS,
                              InstCombiner::BuilderTy &Builder) {}

/// This is the complement of getFCmpCode, which turns an opcode and two
/// operands into either a FCmp instruction, or a true/false constant.
static Value *getFCmpValue(unsigned Code, Value *LHS, Value *RHS,
                           InstCombiner::BuilderTy &Builder) {}

/// Emit a computation of: (V >= Lo && V < Hi) if Inside is true, otherwise
/// (V < Lo || V >= Hi). This method expects that Lo < Hi. IsSigned indicates
/// whether to treat V, Lo, and Hi as signed or not.
Value *InstCombinerImpl::insertRangeTest(Value *V, const APInt &Lo,
                                         const APInt &Hi, bool isSigned,
                                         bool Inside) {}

/// Classify (icmp eq (A & B), C) and (icmp ne (A & B), C) as matching patterns
/// that can be simplified.
/// One of A and B is considered the mask. The other is the value. This is
/// described as the "AMask" or "BMask" part of the enum. If the enum contains
/// only "Mask", then both A and B can be considered masks. If A is the mask,
/// then it was proven that (A & C) == C. This is trivial if C == A or C == 0.
/// If both A and C are constants, this proof is also easy.
/// For the following explanations, we assume that A is the mask.
///
/// "AllOnes" declares that the comparison is true only if (A & B) == A or all
/// bits of A are set in B.
///   Example: (icmp eq (A & 3), 3) -> AMask_AllOnes
///
/// "AllZeros" declares that the comparison is true only if (A & B) == 0 or all
/// bits of A are cleared in B.
///   Example: (icmp eq (A & 3), 0) -> Mask_AllZeroes
///
/// "Mixed" declares that (A & B) == C and C might or might not contain any
/// number of one bits and zero bits.
///   Example: (icmp eq (A & 3), 1) -> AMask_Mixed
///
/// "Not" means that in above descriptions "==" should be replaced by "!=".
///   Example: (icmp ne (A & 3), 3) -> AMask_NotAllOnes
///
/// If the mask A contains a single bit, then the following is equivalent:
///    (icmp eq (A & B), A) equals (icmp ne (A & B), 0)
///    (icmp ne (A & B), A) equals (icmp eq (A & B), 0)
enum MaskedICmpType {};

/// Return the set of patterns (from MaskedICmpType) that (icmp SCC (A & B), C)
/// satisfies.
static unsigned getMaskedICmpType(Value *A, Value *B, Value *C,
                                  ICmpInst::Predicate Pred) {}

/// Convert an analysis of a masked ICmp into its equivalent if all boolean
/// operations had the opposite sense. Since each "NotXXX" flag (recording !=)
/// is adjacent to the corresponding normal flag (recording ==), this just
/// involves swapping those bits over.
static unsigned conjugateICmpMask(unsigned Mask) {}

// Adapts the external decomposeBitTestICmp for local use.
static bool decomposeBitTestICmp(Value *LHS, Value *RHS, CmpInst::Predicate &Pred,
                                 Value *&X, Value *&Y, Value *&Z) {}

/// Handle (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E).
/// Return the pattern classes (from MaskedICmpType) for the left hand side and
/// the right hand side as a pair.
/// LHS and RHS are the left hand side and the right hand side ICmps and PredL
/// and PredR are their predicates, respectively.
static std::optional<std::pair<unsigned, unsigned>> getMaskedTypeForICmpPair(
    Value *&A, Value *&B, Value *&C, Value *&D, Value *&E, ICmpInst *LHS,
    ICmpInst *RHS, ICmpInst::Predicate &PredL, ICmpInst::Predicate &PredR) {}

/// Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E) into a single
/// (icmp(A & X) ==/!= Y), where the left-hand side is of type Mask_NotAllZeros
/// and the right hand side is of type BMask_Mixed. For example,
/// (icmp (A & 12) != 0) & (icmp (A & 15) == 8) -> (icmp (A & 15) == 8).
/// Also used for logical and/or, must be poison safe.
static Value *foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed(
    ICmpInst *LHS, ICmpInst *RHS, bool IsAnd, Value *A, Value *B, Value *D,
    Value *E, ICmpInst::Predicate PredL, ICmpInst::Predicate PredR,
    InstCombiner::BuilderTy &Builder) {}

/// Try to fold (icmp(A & B) ==/!= 0) &/| (icmp(A & D) ==/!= E) into a single
/// (icmp(A & X) ==/!= Y), where the left-hand side and the right hand side
/// aren't of the common mask pattern type.
/// Also used for logical and/or, must be poison safe.
static Value *foldLogOpOfMaskedICmpsAsymmetric(
    ICmpInst *LHS, ICmpInst *RHS, bool IsAnd, Value *A, Value *B, Value *C,
    Value *D, Value *E, ICmpInst::Predicate PredL, ICmpInst::Predicate PredR,
    unsigned LHSMask, unsigned RHSMask, InstCombiner::BuilderTy &Builder) {}

/// Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E)
/// into a single (icmp(A & X) ==/!= Y).
static Value *foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd,
                                     bool IsLogical,
                                     InstCombiner::BuilderTy &Builder) {}

/// Try to fold a signed range checked with lower bound 0 to an unsigned icmp.
/// Example: (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n
/// If \p Inverted is true then the check is for the inverted range, e.g.
/// (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n
Value *InstCombinerImpl::simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1,
                                            bool Inverted) {}

// (or (icmp eq X, 0), (icmp eq X, Pow2OrZero))
//      -> (icmp eq (and X, Pow2OrZero), X)
// (and (icmp ne X, 0), (icmp ne X, Pow2OrZero))
//      -> (icmp ne (and X, Pow2OrZero), X)
static Value *
foldAndOrOfICmpsWithPow2AndWithZero(InstCombiner::BuilderTy &Builder,
                                    ICmpInst *LHS, ICmpInst *RHS, bool IsAnd,
                                    const SimplifyQuery &Q) {}

// Fold (iszero(A & K1) | iszero(A & K2)) -> (A & (K1 | K2)) != (K1 | K2)
// Fold (!iszero(A & K1) & !iszero(A & K2)) -> (A & (K1 | K2)) == (K1 | K2)
Value *InstCombinerImpl::foldAndOrOfICmpsOfAndWithPow2(ICmpInst *LHS,
                                                       ICmpInst *RHS,
                                                       Instruction *CxtI,
                                                       bool IsAnd,
                                                       bool IsLogical) {}

/// General pattern:
///   X & Y
///
/// Where Y is checking that all the high bits (covered by a mask 4294967168)
/// are uniform, i.e.  %arg & 4294967168  can be either  4294967168  or  0
/// Pattern can be one of:
///   %t = add        i32 %arg,    128
///   %r = icmp   ult i32 %t,      256
/// Or
///   %t0 = shl       i32 %arg,    24
///   %t1 = ashr      i32 %t0,     24
///   %r  = icmp  eq  i32 %t1,     %arg
/// Or
///   %t0 = trunc     i32 %arg  to i8
///   %t1 = sext      i8  %t0   to i32
///   %r  = icmp  eq  i32 %t1,     %arg
/// This pattern is a signed truncation check.
///
/// And X is checking that some bit in that same mask is zero.
/// I.e. can be one of:
///   %r = icmp sgt i32   %arg,    -1
/// Or
///   %t = and      i32   %arg,    2147483648
///   %r = icmp eq  i32   %t,      0
///
/// Since we are checking that all the bits in that mask are the same,
/// and a particular bit is zero, what we are really checking is that all the
/// masked bits are zero.
/// So this should be transformed to:
///   %r = icmp ult i32 %arg, 128
static Value *foldSignedTruncationCheck(ICmpInst *ICmp0, ICmpInst *ICmp1,
                                        Instruction &CxtI,
                                        InstCombiner::BuilderTy &Builder) {}

/// Fold (icmp eq ctpop(X) 1) | (icmp eq X 0) into (icmp ult ctpop(X) 2) and
/// fold (icmp ne ctpop(X) 1) & (icmp ne X 0) into (icmp ugt ctpop(X) 1).
/// Also used for logical and/or, must be poison safe.
static Value *foldIsPowerOf2OrZero(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd,
                                   InstCombiner::BuilderTy &Builder) {}

/// Reduce a pair of compares that check if a value has exactly 1 bit set.
/// Also used for logical and/or, must be poison safe if range attributes are
/// dropped.
static Value *foldIsPowerOf2(ICmpInst *Cmp0, ICmpInst *Cmp1, bool JoinedByAnd,
                             InstCombiner::BuilderTy &Builder,
                             InstCombinerImpl &IC) {}

/// Try to fold (icmp(A & B) == 0) & (icmp(A & D) != E) into (icmp A u< D) iff
/// B is a contiguous set of ones starting from the most significant bit
/// (negative power of 2), D and E are equal, and D is a contiguous set of ones
/// starting at the most significant zero bit in B. Parameter B supports masking
/// using undef/poison in either scalar or vector values.
static Value *foldNegativePower2AndShiftedMask(
    Value *A, Value *B, Value *D, Value *E, ICmpInst::Predicate PredL,
    ICmpInst::Predicate PredR, InstCombiner::BuilderTy &Builder) {}

/// Try to fold ((icmp X u< P) & (icmp(X & M) != M)) or ((icmp X s> -1) &
/// (icmp(X & M) != M)) into (icmp X u< M). Where P is a power of 2, M < P, and
/// M is a contiguous shifted mask starting at the right most significant zero
/// bit in P. SGT is supported as when P is the largest representable power of
/// 2, an earlier optimization converts the expression into (icmp X s> -1).
/// Parameter P supports masking using undef/poison in either scalar or vector
/// values.
static Value *foldPowerOf2AndShiftedMask(ICmpInst *Cmp0, ICmpInst *Cmp1,
                                         bool JoinedByAnd,
                                         InstCombiner::BuilderTy &Builder) {}

/// Commuted variants are assumed to be handled by calling this function again
/// with the parameters swapped.
static Value *foldUnsignedUnderflowCheck(ICmpInst *ZeroICmp,
                                         ICmpInst *UnsignedICmp, bool IsAnd,
                                         const SimplifyQuery &Q,
                                         InstCombiner::BuilderTy &Builder) {}

struct IntPart {};

/// Match an extraction of bits from an integer.
static std::optional<IntPart> matchIntPart(Value *V) {}

/// Materialize an extraction of bits from an integer in IR.
static Value *extractIntPart(const IntPart &P, IRBuilderBase &Builder) {}

/// (icmp eq X0, Y0) & (icmp eq X1, Y1) -> icmp eq X01, Y01
/// (icmp ne X0, Y0) | (icmp ne X1, Y1) -> icmp ne X01, Y01
/// where X0, X1 and Y0, Y1 are adjacent parts extracted from an integer.
Value *InstCombinerImpl::foldEqOfParts(ICmpInst *Cmp0, ICmpInst *Cmp1,
                                       bool IsAnd) {}

/// Reduce logic-of-compares with equality to a constant by substituting a
/// common operand with the constant. Callers are expected to call this with
/// Cmp0/Cmp1 switched to handle logic op commutativity.
static Value *foldAndOrOfICmpsWithConstEq(ICmpInst *Cmp0, ICmpInst *Cmp1,
                                          bool IsAnd, bool IsLogical,
                                          InstCombiner::BuilderTy &Builder,
                                          const SimplifyQuery &Q) {}

/// Fold (icmp Pred1 V1, C1) & (icmp Pred2 V2, C2)
/// or   (icmp Pred1 V1, C1) | (icmp Pred2 V2, C2)
/// into a single comparison using range-based reasoning.
/// NOTE: This is also used for logical and/or, must be poison-safe!
Value *InstCombinerImpl::foldAndOrOfICmpsUsingRanges(ICmpInst *ICmp1,
                                                     ICmpInst *ICmp2,
                                                     bool IsAnd) {}

/// Ignore all operations which only change the sign of a value, returning the
/// underlying magnitude value.
static Value *stripSignOnlyFPOps(Value *Val) {}

/// Matches canonical form of isnan, fcmp ord x, 0
static bool matchIsNotNaN(FCmpInst::Predicate P, Value *LHS, Value *RHS) {}

/// Matches fcmp u__ x, +/-inf
static bool matchUnorderedInfCompare(FCmpInst::Predicate P, Value *LHS,
                                     Value *RHS) {}

/// and (fcmp ord x, 0), (fcmp u* x, inf) -> fcmp o* x, inf
///
/// Clang emits this pattern for doing an isfinite check in __builtin_isnormal.
static Value *matchIsFiniteTest(InstCombiner::BuilderTy &Builder, FCmpInst *LHS,
                                FCmpInst *RHS) {}

Value *InstCombinerImpl::foldLogicOfFCmps(FCmpInst *LHS, FCmpInst *RHS,
                                          bool IsAnd, bool IsLogicalSelect) {}

/// Match an fcmp against a special value that performs a test possible by
/// llvm.is.fpclass.
static bool matchIsFPClassLikeFCmp(Value *Op, Value *&ClassVal,
                                   uint64_t &ClassMask) {}

/// or (is_fpclass x, mask0), (is_fpclass x, mask1)
///     -> is_fpclass x, (mask0 | mask1)
/// and (is_fpclass x, mask0), (is_fpclass x, mask1)
///     -> is_fpclass x, (mask0 & mask1)
/// xor (is_fpclass x, mask0), (is_fpclass x, mask1)
///     -> is_fpclass x, (mask0 ^ mask1)
Instruction *InstCombinerImpl::foldLogicOfIsFPClass(BinaryOperator &BO,
                                                    Value *Op0, Value *Op1) {}

/// Look for the pattern that conditionally negates a value via math operations:
///   cond.splat = sext i1 cond
///   sub = add cond.splat, x
///   xor = xor sub, cond.splat
/// and rewrite it to do the same, but via logical operations:
///   value.neg = sub 0, value
///   cond = select i1 neg, value.neg, value
Instruction *InstCombinerImpl::canonicalizeConditionalNegationViaMathToSelect(
    BinaryOperator &I) {}

/// This a limited reassociation for a special case (see above) where we are
/// checking if two values are either both NAN (unordered) or not-NAN (ordered).
/// This could be handled more generally in '-reassociation', but it seems like
/// an unlikely pattern for a large number of logic ops and fcmps.
static Instruction *reassociateFCmps(BinaryOperator &BO,
                                     InstCombiner::BuilderTy &Builder) {}

/// Match variations of De Morgan's Laws:
/// (~A & ~B) == (~(A | B))
/// (~A | ~B) == (~(A & B))
static Instruction *matchDeMorgansLaws(BinaryOperator &I,
                                       InstCombiner &IC) {}

bool InstCombinerImpl::shouldOptimizeCast(CastInst *CI) {}

/// Fold {and,or,xor} (cast X), C.
static Instruction *foldLogicCastConstant(BinaryOperator &Logic, CastInst *Cast,
                                          InstCombinerImpl &IC) {}

/// Fold {and,or,xor} (cast X), Y.
Instruction *InstCombinerImpl::foldCastedBitwiseLogic(BinaryOperator &I) {}

static Instruction *foldAndToXor(BinaryOperator &I,
                                 InstCombiner::BuilderTy &Builder) {}

static Instruction *foldOrToXor(BinaryOperator &I,
                                InstCombiner::BuilderTy &Builder) {}

/// Return true if a constant shift amount is always less than the specified
/// bit-width. If not, the shift could create poison in the narrower type.
static bool canNarrowShiftAmt(Constant *C, unsigned BitWidth) {}

/// Try to use narrower ops (sink zext ops) for an 'and' with binop operand and
/// a common zext operand: and (binop (zext X), C), (zext X).
Instruction *InstCombinerImpl::narrowMaskedBinOp(BinaryOperator &And) {}

/// Try folding relatively complex patterns for both And and Or operations
/// with all And and Or swapped.
static Instruction *foldComplexAndOrPatterns(BinaryOperator &I,
                                             InstCombiner::BuilderTy &Builder) {}

/// Try to reassociate a pair of binops so that values with one use only are
/// part of the same instruction. This may enable folds that are limited with
/// multi-use restrictions and makes it more likely to match other patterns that
/// are looking for a common operand.
static Instruction *reassociateForUses(BinaryOperator &BO,
                                       InstCombinerImpl::BuilderTy &Builder) {}

// Match
// (X + C2) | C
// (X + C2) ^ C
// (X + C2) & C
// and convert to do the bitwise logic first:
// (X | C) + C2
// (X ^ C) + C2
// (X & C) + C2
// iff bits affected by logic op are lower than last bit affected by math op
static Instruction *canonicalizeLogicFirst(BinaryOperator &I,
                                           InstCombiner::BuilderTy &Builder) {}

// binop(shift(ShiftedC1, ShAmt), shift(ShiftedC2, add(ShAmt, AddC))) ->
// shift(binop(ShiftedC1, shift(ShiftedC2, AddC)), ShAmt)
// where both shifts are the same and AddC is a valid shift amount.
Instruction *InstCombinerImpl::foldBinOpOfDisplacedShifts(BinaryOperator &I) {}

// Fold and/or/xor with two equal intrinsic IDs:
// bitwise(fshl (A, B, ShAmt), fshl(C, D, ShAmt))
// -> fshl(bitwise(A, C), bitwise(B, D), ShAmt)
// bitwise(fshr (A, B, ShAmt), fshr(C, D, ShAmt))
// -> fshr(bitwise(A, C), bitwise(B, D), ShAmt)
// bitwise(bswap(A), bswap(B)) -> bswap(bitwise(A, B))
// bitwise(bswap(A), C) -> bswap(bitwise(A, bswap(C)))
// bitwise(bitreverse(A), bitreverse(B)) -> bitreverse(bitwise(A, B))
// bitwise(bitreverse(A), C) -> bitreverse(bitwise(A, bitreverse(C)))
static Instruction *
foldBitwiseLogicWithIntrinsics(BinaryOperator &I,
                               InstCombiner::BuilderTy &Builder) {}

// Try to simplify V by replacing occurrences of Op with RepOp, but only look
// through bitwise operations. In particular, for X | Y we try to replace Y with
// 0 inside X and for X & Y we try to replace Y with -1 inside X.
// Return the simplified result of X if successful, and nullptr otherwise.
// If SimplifyOnly is true, no new instructions will be created.
static Value *simplifyAndOrWithOpReplaced(Value *V, Value *Op, Value *RepOp,
                                          bool SimplifyOnly,
                                          InstCombinerImpl &IC,
                                          unsigned Depth = 0) {}

// FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
// here. We should standardize that construct where it is needed or choose some
// other way to ensure that commutated variants of patterns are not missed.
Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {}

Instruction *InstCombinerImpl::matchBSwapOrBitReverse(Instruction &I,
                                                      bool MatchBSwaps,
                                                      bool MatchBitReversals) {}

std::optional<std::pair<Intrinsic::ID, SmallVector<Value *, 3>>>
InstCombinerImpl::convertOrOfShiftsToFunnelShift(Instruction &Or) {}

/// Match UB-safe variants of the funnel shift intrinsic.
static Instruction *matchFunnelShift(Instruction &Or, InstCombinerImpl &IC) {}

/// Attempt to combine or(zext(x),shl(zext(y),bw/2) concat packing patterns.
static Instruction *matchOrConcat(Instruction &Or,
                                  InstCombiner::BuilderTy &Builder) {}

/// If all elements of two constant vectors are 0/-1 and inverses, return true.
static bool areInverseVectorBitmasks(Constant *C1, Constant *C2) {}

/// We have an expression of the form (A & C) | (B & D). If A is a scalar or
/// vector composed of all-zeros or all-ones values and is the bitwise 'not' of
/// B, it can be used as the condition operand of a select instruction.
/// We will detect (A & C) | ~(B | D) when the flag ABIsTheSame enabled.
Value *InstCombinerImpl::getSelectCondition(Value *A, Value *B,
                                            bool ABIsTheSame) {}

/// We have an expression of the form (A & B) | (C & D). Try to simplify this
/// to "A' ? B : D", where A' is a boolean or vector of booleans.
/// When InvertFalseVal is set to true, we try to match the pattern
/// where we have peeked through a 'not' op and A and C are the same:
/// (A & B) | ~(A | D) --> (A & B) | (~A & ~D) --> A' ? B : ~D
Value *InstCombinerImpl::matchSelectFromAndOr(Value *A, Value *B, Value *C,
                                              Value *D, bool InvertFalseVal) {}

// (icmp eq X, C) | (icmp ult Other, (X - C)) -> (icmp ule Other, (X - (C + 1)))
// (icmp ne X, C) & (icmp uge Other, (X - C)) -> (icmp ugt Other, (X - (C + 1)))
static Value *foldAndOrOfICmpEqConstantAndICmp(ICmpInst *LHS, ICmpInst *RHS,
                                               bool IsAnd, bool IsLogical,
                                               IRBuilderBase &Builder) {}

/// Fold (icmp)&(icmp) or (icmp)|(icmp) if possible.
/// If IsLogical is true, then the and/or is in select form and the transform
/// must be poison-safe.
Value *InstCombinerImpl::foldAndOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
                                          Instruction &I, bool IsAnd,
                                          bool IsLogical) {}

static Value *foldOrOfInversions(BinaryOperator &I,
                                 InstCombiner::BuilderTy &Builder) {}

// FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
// here. We should standardize that construct where it is needed or choose some
// other way to ensure that commutated variants of patterns are not missed.
Instruction *InstCombinerImpl::visitOr(BinaryOperator &I) {}

/// A ^ B can be specified using other logic ops in a variety of patterns. We
/// can fold these early and efficiently by morphing an existing instruction.
static Instruction *foldXorToXor(BinaryOperator &I,
                                 InstCombiner::BuilderTy &Builder) {}

Value *InstCombinerImpl::foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS,
                                        BinaryOperator &I) {}

/// If we have a masked merge, in the canonical form of:
/// (assuming that A only has one use.)
///   |        A  |  |B|
///   ((x ^ y) & M) ^ y
///    |  D  |
/// * If M is inverted:
///      |  D  |
///     ((x ^ y) & ~M) ^ y
///   We can canonicalize by swapping the final xor operand
///   to eliminate the 'not' of the mask.
///     ((x ^ y) & M) ^ x
/// * If M is a constant, and D has one use, we transform to 'and' / 'or' ops
///   because that shortens the dependency chain and improves analysis:
///     (x & M) | (y & ~M)
static Instruction *visitMaskedMerge(BinaryOperator &I,
                                     InstCombiner::BuilderTy &Builder) {}

static Instruction *foldNotXor(BinaryOperator &I,
                               InstCombiner::BuilderTy &Builder) {}

/// Canonicalize a shifty way to code absolute value to the more common pattern
/// that uses negation and select.
static Instruction *canonicalizeAbs(BinaryOperator &Xor,
                                    InstCombiner::BuilderTy &Builder) {}

static bool canFreelyInvert(InstCombiner &IC, Value *Op,
                            Instruction *IgnoredUser) {}

static Value *freelyInvert(InstCombinerImpl &IC, Value *Op,
                           Instruction *IgnoredUser) {}

// Transform
//   z = ~(x &/| y)
// into:
//   z = ((~x) |/& (~y))
// iff both x and y are free to invert and all uses of z can be freely updated.
bool InstCombinerImpl::sinkNotIntoLogicalOp(Instruction &I) {}

// Transform
//   z = (~x) &/| y
// into:
//   z = ~(x |/& (~y))
// iff y is free to invert and all uses of z can be freely updated.
bool InstCombinerImpl::sinkNotIntoOtherHandOfLogicalOp(Instruction &I) {}

Instruction *InstCombinerImpl::foldNot(BinaryOperator &I) {}

// FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
// here. We should standardize that construct where it is needed or choose some
// other way to ensure that commutated variants of patterns are not missed.
Instruction *InstCombinerImpl::visitXor(BinaryOperator &I) {}