//===- InstCombineCompares.cpp --------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements the visitICmp and visitFCmp functions. // //===----------------------------------------------------------------------===// #include "InstCombineInternal.h" #include "llvm/ADT/APSInt.h" #include "llvm/ADT/ScopeExit.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/CaptureTracking.h" #include "llvm/Analysis/CmpInstAnalysis.h" #include "llvm/Analysis/ConstantFolding.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/Utils/Local.h" #include "llvm/Analysis/VectorUtils.h" #include "llvm/IR/ConstantRange.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/InstrTypes.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/PatternMatch.h" #include "llvm/Support/KnownBits.h" #include "llvm/Transforms/InstCombine/InstCombiner.h" #include <bitset> usingnamespacellvm; usingnamespacePatternMatch; #define DEBUG_TYPE … // How many times is a select replaced by one of its operands? STATISTIC(NumSel, "Number of select opts"); /// Compute Result = In1+In2, returning true if the result overflowed for this /// type. static bool addWithOverflow(APInt &Result, const APInt &In1, const APInt &In2, bool IsSigned = false) { … } /// Compute Result = In1-In2, returning true if the result overflowed for this /// type. static bool subWithOverflow(APInt &Result, const APInt &In1, const APInt &In2, bool IsSigned = false) { … } /// Given an icmp instruction, return true if any use of this comparison is a /// branch on sign bit comparison. static bool hasBranchUse(ICmpInst &I) { … } /// Returns true if the exploded icmp can be expressed as a signed comparison /// to zero and updates the predicate accordingly. /// The signedness of the comparison is preserved. /// TODO: Refactor with decomposeBitTestICmp()? static bool isSignTest(ICmpInst::Predicate &Pred, const APInt &C) { … } /// This is called when we see this pattern: /// cmp pred (load (gep GV, ...)), cmpcst /// where GV is a global variable with a constant initializer. Try to simplify /// this into some simple computation that does not need the load. For example /// we can optimize "icmp eq (load (gep "foo", 0, i)), 0" into "icmp eq i, 3". /// /// If AndCst is non-null, then the loaded value is masked with that constant /// before doing the comparison. This handles cases like "A[i]&4 == 0". Instruction *InstCombinerImpl::foldCmpLoadFromIndexedGlobal( LoadInst *LI, GetElementPtrInst *GEP, GlobalVariable *GV, CmpInst &ICI, ConstantInt *AndCst) { … } /// Returns true if we can rewrite Start as a GEP with pointer Base /// and some integer offset. The nodes that need to be re-written /// for this transformation will be added to Explored. static bool canRewriteGEPAsOffset(Value *Start, Value *Base, GEPNoWrapFlags &NW, const DataLayout &DL, SetVector<Value *> &Explored) { … } // Sets the appropriate insert point on Builder where we can add // a replacement Instruction for V (if that is possible). static void setInsertionPoint(IRBuilder<> &Builder, Value *V, bool Before = true) { … } /// Returns a re-written value of Start as an indexed GEP using Base as a /// pointer. static Value *rewriteGEPAsOffset(Value *Start, Value *Base, GEPNoWrapFlags NW, const DataLayout &DL, SetVector<Value *> &Explored, InstCombiner &IC) { … } /// Converts (CMP GEPLHS, RHS) if this change would make RHS a constant. /// We can look through PHIs, GEPs and casts in order to determine a common base /// between GEPLHS and RHS. static Instruction *transformToIndexedCompare(GEPOperator *GEPLHS, Value *RHS, ICmpInst::Predicate Cond, const DataLayout &DL, InstCombiner &IC) { … } /// Fold comparisons between a GEP instruction and something else. At this point /// we know that the GEP is on the LHS of the comparison. Instruction *InstCombinerImpl::foldGEPICmp(GEPOperator *GEPLHS, Value *RHS, ICmpInst::Predicate Cond, Instruction &I) { … } bool InstCombinerImpl::foldAllocaCmp(AllocaInst *Alloca) { … } /// Fold "icmp pred (X+C), X". Instruction *InstCombinerImpl::foldICmpAddOpConst(Value *X, const APInt &C, ICmpInst::Predicate Pred) { … } /// Handle "(icmp eq/ne (ashr/lshr AP2, A), AP1)" -> /// (icmp eq/ne A, Log2(AP2/AP1)) -> /// (icmp eq/ne A, Log2(AP2) - Log2(AP1)). Instruction *InstCombinerImpl::foldICmpShrConstConst(ICmpInst &I, Value *A, const APInt &AP1, const APInt &AP2) { … } /// Handle "(icmp eq/ne (shl AP2, A), AP1)" -> /// (icmp eq/ne A, TrailingZeros(AP1) - TrailingZeros(AP2)). Instruction *InstCombinerImpl::foldICmpShlConstConst(ICmpInst &I, Value *A, const APInt &AP1, const APInt &AP2) { … } /// The caller has matched a pattern of the form: /// I = icmp ugt (add (add A, B), CI2), CI1 /// If this is of the form: /// sum = a + b /// if (sum+128 >u 255) /// Then replace it with llvm.sadd.with.overflow.i8. /// static Instruction *processUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B, ConstantInt *CI2, ConstantInt *CI1, InstCombinerImpl &IC) { … } /// If we have: /// icmp eq/ne (urem/srem %x, %y), 0 /// iff %y is a power-of-two, we can replace this with a bit test: /// icmp eq/ne (and %x, (add %y, -1)), 0 Instruction *InstCombinerImpl::foldIRemByPowerOfTwoToBitTest(ICmpInst &I) { … } /// Fold equality-comparison between zero and any (maybe truncated) right-shift /// by one-less-than-bitwidth into a sign test on the original value. Instruction *InstCombinerImpl::foldSignBitTest(ICmpInst &I) { … } // Handle icmp pred X, 0 Instruction *InstCombinerImpl::foldICmpWithZero(ICmpInst &Cmp) { … } /// Fold icmp Pred X, C. /// TODO: This code structure does not make sense. The saturating add fold /// should be moved to some other helper and extended as noted below (it is also /// possible that code has been made unnecessary - do we canonicalize IR to /// overflow/saturating intrinsics or not?). Instruction *InstCombinerImpl::foldICmpWithConstant(ICmpInst &Cmp) { … } /// Canonicalize icmp instructions based on dominating conditions. Instruction *InstCombinerImpl::foldICmpWithDominatingICmp(ICmpInst &Cmp) { … } /// Fold icmp (trunc X), C. Instruction *InstCombinerImpl::foldICmpTruncConstant(ICmpInst &Cmp, TruncInst *Trunc, const APInt &C) { … } /// Fold icmp (trunc nuw/nsw X), (trunc nuw/nsw Y). /// Fold icmp (trunc nuw/nsw X), (zext/sext Y). Instruction * InstCombinerImpl::foldICmpTruncWithTruncOrExt(ICmpInst &Cmp, const SimplifyQuery &Q) { … } /// Fold icmp (xor X, Y), C. Instruction *InstCombinerImpl::foldICmpXorConstant(ICmpInst &Cmp, BinaryOperator *Xor, const APInt &C) { … } /// For power-of-2 C: /// ((X s>> ShiftC) ^ X) u< C --> (X + C) u< (C << 1) /// ((X s>> ShiftC) ^ X) u> (C - 1) --> (X + C) u> ((C << 1) - 1) Instruction *InstCombinerImpl::foldICmpXorShiftConst(ICmpInst &Cmp, BinaryOperator *Xor, const APInt &C) { … } /// Fold icmp (and (sh X, Y), C2), C1. Instruction *InstCombinerImpl::foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And, const APInt &C1, const APInt &C2) { … } /// Fold icmp (and X, C2), C1. Instruction *InstCombinerImpl::foldICmpAndConstConst(ICmpInst &Cmp, BinaryOperator *And, const APInt &C1) { … } /// Fold icmp (and X, Y), C. Instruction *InstCombinerImpl::foldICmpAndConstant(ICmpInst &Cmp, BinaryOperator *And, const APInt &C) { … } /// Fold icmp eq/ne (or (xor/sub (X1, X2), xor/sub (X3, X4))), 0. static Value *foldICmpOrXorSubChain(ICmpInst &Cmp, BinaryOperator *Or, InstCombiner::BuilderTy &Builder) { … } /// Fold icmp (or X, Y), C. Instruction *InstCombinerImpl::foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or, const APInt &C) { … } /// Fold icmp (mul X, Y), C. Instruction *InstCombinerImpl::foldICmpMulConstant(ICmpInst &Cmp, BinaryOperator *Mul, const APInt &C) { … } /// Fold icmp (shl nuw C2, Y), C. static Instruction *foldICmpShlLHSC(ICmpInst &Cmp, Instruction *Shl, const APInt &C) { … } /// Fold icmp (shl X, Y), C. Instruction *InstCombinerImpl::foldICmpShlConstant(ICmpInst &Cmp, BinaryOperator *Shl, const APInt &C) { … } /// Fold icmp ({al}shr X, Y), C. Instruction *InstCombinerImpl::foldICmpShrConstant(ICmpInst &Cmp, BinaryOperator *Shr, const APInt &C) { … } Instruction *InstCombinerImpl::foldICmpSRemConstant(ICmpInst &Cmp, BinaryOperator *SRem, const APInt &C) { … } /// Fold icmp (udiv X, Y), C. Instruction *InstCombinerImpl::foldICmpUDivConstant(ICmpInst &Cmp, BinaryOperator *UDiv, const APInt &C) { … } /// Fold icmp ({su}div X, Y), C. Instruction *InstCombinerImpl::foldICmpDivConstant(ICmpInst &Cmp, BinaryOperator *Div, const APInt &C) { … } /// Fold icmp (sub X, Y), C. Instruction *InstCombinerImpl::foldICmpSubConstant(ICmpInst &Cmp, BinaryOperator *Sub, const APInt &C) { … } static Value *createLogicFromTable(const std::bitset<4> &Table, Value *Op0, Value *Op1, IRBuilderBase &Builder, bool HasOneUse) { … } /// Fold icmp (add X, Y), C. Instruction *InstCombinerImpl::foldICmpAddConstant(ICmpInst &Cmp, BinaryOperator *Add, const APInt &C) { … } bool InstCombinerImpl::matchThreeWayIntCompare(SelectInst *SI, Value *&LHS, Value *&RHS, ConstantInt *&Less, ConstantInt *&Equal, ConstantInt *&Greater) { … } Instruction *InstCombinerImpl::foldICmpSelectConstant(ICmpInst &Cmp, SelectInst *Select, ConstantInt *C) { … } Instruction *InstCombinerImpl::foldICmpBitCast(ICmpInst &Cmp) { … } /// Try to fold integer comparisons with a constant operand: icmp Pred X, C /// where X is some kind of instruction. Instruction *InstCombinerImpl::foldICmpInstWithConstant(ICmpInst &Cmp) { … } /// Fold an icmp equality instruction with binary operator LHS and constant RHS: /// icmp eq/ne BO, C. Instruction *InstCombinerImpl::foldICmpBinOpEqualityWithConstant( ICmpInst &Cmp, BinaryOperator *BO, const APInt &C) { … } static Instruction *foldCtpopPow2Test(ICmpInst &I, IntrinsicInst *CtpopLhs, const APInt &CRhs, InstCombiner::BuilderTy &Builder, const SimplifyQuery &Q) { … } /// Fold an equality icmp with LLVM intrinsic and constant operand. Instruction *InstCombinerImpl::foldICmpEqIntrinsicWithConstant( ICmpInst &Cmp, IntrinsicInst *II, const APInt &C) { … } /// Fold an icmp with LLVM intrinsics static Instruction * foldICmpIntrinsicWithIntrinsic(ICmpInst &Cmp, InstCombiner::BuilderTy &Builder) { … } /// Try to fold integer comparisons with a constant operand: icmp Pred X, C /// where X is some kind of instruction and C is AllowPoison. /// TODO: Move more folds which allow poison to this function. Instruction * InstCombinerImpl::foldICmpInstWithConstantAllowPoison(ICmpInst &Cmp, const APInt &C) { … } /// Fold an icmp with BinaryOp and constant operand: icmp Pred BO, C. Instruction *InstCombinerImpl::foldICmpBinOpWithConstant(ICmpInst &Cmp, BinaryOperator *BO, const APInt &C) { … } static Instruction * foldICmpUSubSatOrUAddSatWithConstant(ICmpInst::Predicate Pred, SaturatingInst *II, const APInt &C, InstCombiner::BuilderTy &Builder) { … } static Instruction * foldICmpOfCmpIntrinsicWithConstant(ICmpInst::Predicate Pred, IntrinsicInst *I, const APInt &C, InstCombiner::BuilderTy &Builder) { … } /// Fold an icmp with LLVM intrinsic and constant operand: icmp Pred II, C. Instruction *InstCombinerImpl::foldICmpIntrinsicWithConstant(ICmpInst &Cmp, IntrinsicInst *II, const APInt &C) { … } /// Handle icmp with constant (but not simple integer constant) RHS. Instruction *InstCombinerImpl::foldICmpInstWithConstantNotInt(ICmpInst &I) { … } Instruction *InstCombinerImpl::foldSelectICmp(ICmpInst::Predicate Pred, SelectInst *SI, Value *RHS, const ICmpInst &I) { … } // Returns whether V is a Mask ((X + 1) & X == 0) or ~Mask (-Pow2OrZero) static bool isMaskOrZero(const Value *V, bool Not, const SimplifyQuery &Q, unsigned Depth = 0) { … } /// Some comparisons can be simplified. /// In this case, we are looking for comparisons that look like /// a check for a lossy truncation. /// Folds: /// icmp SrcPred (x & Mask), x to icmp DstPred x, Mask /// icmp SrcPred (x & ~Mask), ~Mask to icmp DstPred x, ~Mask /// icmp eq/ne (x & ~Mask), 0 to icmp DstPred x, Mask /// icmp eq/ne (~x | Mask), -1 to icmp DstPred x, Mask /// Where Mask is some pattern that produces all-ones in low bits: /// (-1 >> y) /// ((-1 << y) >> y) <- non-canonical, has extra uses /// ~(-1 << y) /// ((1 << y) + (-1)) <- non-canonical, has extra uses /// The Mask can be a constant, too. /// For some predicates, the operands are commutative. /// For others, x can only be on a specific side. static Value *foldICmpWithLowBitMaskedVal(ICmpInst::Predicate Pred, Value *Op0, Value *Op1, const SimplifyQuery &Q, InstCombiner &IC) { … } /// Some comparisons can be simplified. /// In this case, we are looking for comparisons that look like /// a check for a lossy signed truncation. /// Folds: (MaskedBits is a constant.) /// ((%x << MaskedBits) a>> MaskedBits) SrcPred %x /// Into: /// (add %x, (1 << (KeptBits-1))) DstPred (1 << KeptBits) /// Where KeptBits = bitwidth(%x) - MaskedBits static Value * foldICmpWithTruncSignExtendedVal(ICmpInst &I, InstCombiner::BuilderTy &Builder) { … } // Given pattern: // icmp eq/ne (and ((x shift Q), (y oppositeshift K))), 0 // we should move shifts to the same hand of 'and', i.e. rewrite as // icmp eq/ne (and (x shift (Q+K)), y), 0 iff (Q+K) u< bitwidth(x) // We are only interested in opposite logical shifts here. // One of the shifts can be truncated. // If we can, we want to end up creating 'lshr' shift. static Value * foldShiftIntoShiftInAnotherHandOfAndInICmp(ICmpInst &I, const SimplifyQuery SQ, InstCombiner::BuilderTy &Builder) { … } /// Fold /// (-1 u/ x) u< y /// ((x * y) ?/ x) != y /// to /// @llvm.?mul.with.overflow(x, y) plus extraction of overflow bit /// Note that the comparison is commutative, while inverted (u>=, ==) predicate /// will mean that we are looking for the opposite answer. Value *InstCombinerImpl::foldMultiplicationOverflowCheck(ICmpInst &I) { … } static Instruction *foldICmpXNegX(ICmpInst &I, InstCombiner::BuilderTy &Builder) { … } static Instruction *foldICmpAndXX(ICmpInst &I, const SimplifyQuery &Q, InstCombinerImpl &IC) { … } static Instruction *foldICmpOrXX(ICmpInst &I, const SimplifyQuery &Q, InstCombinerImpl &IC) { … } static Instruction *foldICmpXorXX(ICmpInst &I, const SimplifyQuery &Q, InstCombinerImpl &IC) { … } /// Try to fold icmp (binop), X or icmp X, (binop). /// TODO: A large part of this logic is duplicated in InstSimplify's /// simplifyICmpWithBinOp(). We should be able to share that and avoid the code /// duplication. Instruction *InstCombinerImpl::foldICmpBinOp(ICmpInst &I, const SimplifyQuery &SQ) { … } /// Fold icmp Pred min|max(X, Y), Z. Instruction *InstCombinerImpl::foldICmpWithMinMax(Instruction &I, MinMaxIntrinsic *MinMax, Value *Z, ICmpInst::Predicate Pred) { … } // Canonicalize checking for a power-of-2-or-zero value: static Instruction *foldICmpPow2Test(ICmpInst &I, InstCombiner::BuilderTy &Builder) { … } Instruction *InstCombinerImpl::foldICmpEquality(ICmpInst &I) { … } Instruction *InstCombinerImpl::foldICmpWithTrunc(ICmpInst &ICmp) { … } Instruction *InstCombinerImpl::foldICmpWithZextOrSext(ICmpInst &ICmp) { … } /// Handle icmp (cast x), (cast or constant). Instruction *InstCombinerImpl::foldICmpWithCastOp(ICmpInst &ICmp) { … } static bool isNeutralValue(Instruction::BinaryOps BinaryOp, Value *RHS, bool IsSigned) { … } OverflowResult InstCombinerImpl::computeOverflow(Instruction::BinaryOps BinaryOp, bool IsSigned, Value *LHS, Value *RHS, Instruction *CxtI) const { … } bool InstCombinerImpl::OptimizeOverflowCheck(Instruction::BinaryOps BinaryOp, bool IsSigned, Value *LHS, Value *RHS, Instruction &OrigI, Value *&Result, Constant *&Overflow) { … } /// Recognize and process idiom involving test for multiplication /// overflow. /// /// The caller has matched a pattern of the form: /// I = cmp u (mul(zext A, zext B), V /// The function checks if this is a test for overflow and if so replaces /// multiplication with call to 'mul.with.overflow' intrinsic. /// /// \param I Compare instruction. /// \param MulVal Result of 'mult' instruction. It is one of the arguments of /// the compare instruction. Must be of integer type. /// \param OtherVal The other argument of compare instruction. /// \returns Instruction which must replace the compare instruction, NULL if no /// replacement required. static Instruction *processUMulZExtIdiom(ICmpInst &I, Value *MulVal, const APInt *OtherVal, InstCombinerImpl &IC) { … } /// When performing a comparison against a constant, it is possible that not all /// the bits in the LHS are demanded. This helper method computes the mask that /// IS demanded. static APInt getDemandedBitsLHSMask(ICmpInst &I, unsigned BitWidth) { … } /// Check that one use is in the same block as the definition and all /// other uses are in blocks dominated by a given block. /// /// \param DI Definition /// \param UI Use /// \param DB Block that must dominate all uses of \p DI outside /// the parent block /// \return true when \p UI is the only use of \p DI in the parent block /// and all other uses of \p DI are in blocks dominated by \p DB. /// bool InstCombinerImpl::dominatesAllUses(const Instruction *DI, const Instruction *UI, const BasicBlock *DB) const { … } /// Return true when the instruction sequence within a block is select-cmp-br. static bool isChainSelectCmpBranch(const SelectInst *SI) { … } /// True when a select result is replaced by one of its operands /// in select-icmp sequence. This will eventually result in the elimination /// of the select. /// /// \param SI Select instruction /// \param Icmp Compare instruction /// \param SIOpd Operand that replaces the select /// /// Notes: /// - The replacement is global and requires dominator information /// - The caller is responsible for the actual replacement /// /// Example: /// /// entry: /// %4 = select i1 %3, %C* %0, %C* null /// %5 = icmp eq %C* %4, null /// br i1 %5, label %9, label %7 /// ... /// ; <label>:7 ; preds = %entry /// %8 = getelementptr inbounds %C* %4, i64 0, i32 0 /// ... /// /// can be transformed to /// /// %5 = icmp eq %C* %0, null /// %6 = select i1 %3, i1 %5, i1 true /// br i1 %6, label %9, label %7 /// ... /// ; <label>:7 ; preds = %entry /// %8 = getelementptr inbounds %C* %0, i64 0, i32 0 // replace by %0! /// /// Similar when the first operand of the select is a constant or/and /// the compare is for not equal rather than equal. /// /// NOTE: The function is only called when the select and compare constants /// are equal, the optimization can work only for EQ predicates. This is not a /// major restriction since a NE compare should be 'normalized' to an equal /// compare, which usually happens in the combiner and test case /// select-cmp-br.ll checks for it. bool InstCombinerImpl::replacedSelectWithOperand(SelectInst *SI, const ICmpInst *Icmp, const unsigned SIOpd) { … } /// Try to fold the comparison based on range information we can get by checking /// whether bits are known to be zero or one in the inputs. Instruction *InstCombinerImpl::foldICmpUsingKnownBits(ICmpInst &I) { … } /// If one operand of an icmp is effectively a bool (value range of {0,1}), /// then try to reduce patterns based on that limit. Instruction *InstCombinerImpl::foldICmpUsingBoolRange(ICmpInst &I) { … } std::optional<std::pair<CmpInst::Predicate, Constant *>> InstCombiner::getFlippedStrictnessPredicateAndConstant(CmpInst::Predicate Pred, Constant *C) { … } /// If we have an icmp le or icmp ge instruction with a constant operand, turn /// it into the appropriate icmp lt or icmp gt instruction. This transform /// allows them to be folded in visitICmpInst. static ICmpInst *canonicalizeCmpWithConstant(ICmpInst &I) { … } /// If we have a comparison with a non-canonical predicate, if we can update /// all the users, invert the predicate and adjust all the users. CmpInst *InstCombinerImpl::canonicalizeICmpPredicate(CmpInst &I) { … } /// Integer compare with boolean values can always be turned into bitwise ops. static Instruction *canonicalizeICmpBool(ICmpInst &I, InstCombiner::BuilderTy &Builder) { … } // Transform pattern like: // (1 << Y) u<= X or ~(-1 << Y) u< X or ((1 << Y)+(-1)) u< X // (1 << Y) u> X or ~(-1 << Y) u>= X or ((1 << Y)+(-1)) u>= X // Into: // (X l>> Y) != 0 // (X l>> Y) == 0 static Instruction *foldICmpWithHighBitMask(ICmpInst &Cmp, InstCombiner::BuilderTy &Builder) { … } static Instruction *foldVectorCmp(CmpInst &Cmp, InstCombiner::BuilderTy &Builder) { … } // extract(uadd.with.overflow(A, B), 0) ult A // -> extract(uadd.with.overflow(A, B), 1) static Instruction *foldICmpOfUAddOv(ICmpInst &I) { … } static Instruction *foldICmpInvariantGroup(ICmpInst &I) { … } /// This function folds patterns produced by lowering of reduce idioms, such as /// llvm.vector.reduce.and which are lowered into instruction chains. This code /// attempts to generate fewer number of scalar comparisons instead of vector /// comparisons when possible. static Instruction *foldReductionIdiom(ICmpInst &I, InstCombiner::BuilderTy &Builder, const DataLayout &DL) { … } // This helper will be called with icmp operands in both orders. Instruction *InstCombinerImpl::foldICmpCommutative(ICmpInst::Predicate Pred, Value *Op0, Value *Op1, ICmpInst &CxtI) { … } Instruction *InstCombinerImpl::visitICmpInst(ICmpInst &I) { … } /// Fold fcmp ([us]itofp x, cst) if possible. Instruction *InstCombinerImpl::foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI, Constant *RHSC) { … } /// Fold (C / X) < 0.0 --> X < 0.0 if possible. Swap predicate if necessary. static Instruction *foldFCmpReciprocalAndZero(FCmpInst &I, Instruction *LHSI, Constant *RHSC) { … } /// Optimize fabs(X) compared with zero. static Instruction *foldFabsWithFcmpZero(FCmpInst &I, InstCombinerImpl &IC) { … } /// Optimize sqrt(X) compared with zero. static Instruction *foldSqrtWithFcmpZero(FCmpInst &I, InstCombinerImpl &IC) { … } static Instruction *foldFCmpFNegCommonOp(FCmpInst &I) { … } static Instruction *foldFCmpFSubIntoFCmp(FCmpInst &I, Instruction *LHSI, Constant *RHSC, InstCombinerImpl &CI) { … } static Instruction *foldFCmpWithFloorAndCeil(FCmpInst &I, InstCombinerImpl &IC) { … } Instruction *InstCombinerImpl::visitFCmpInst(FCmpInst &I) { … }