llvm/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp

//===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the visit functions for load, store and alloca.
//
//===----------------------------------------------------------------------===//

#include "InstCombineInternal.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SetOperations.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/Loads.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/Transforms/InstCombine/InstCombiner.h"
#include "llvm/Transforms/Utils/Local.h"
usingnamespacellvm;
usingnamespacePatternMatch;

#define DEBUG_TYPE

STATISTIC(NumDeadStore, "Number of dead stores eliminated");
STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");

static cl::opt<unsigned> MaxCopiedFromConstantUsers(
    "instcombine-max-copied-from-constant-users", cl::init(300),
    cl::desc("Maximum users to visit in copy from constant transform"),
    cl::Hidden);

namespace llvm {
cl::opt<bool> EnableInferAlignmentPass(
    "enable-infer-alignment-pass", cl::init(true), cl::Hidden, cl::ZeroOrMore,
    cl::desc("Enable the InferAlignment pass, disabling alignment inference in "
             "InstCombine"));
}

/// isOnlyCopiedFromConstantMemory - Recursively walk the uses of a (derived)
/// pointer to an alloca.  Ignore any reads of the pointer, return false if we
/// see any stores or other unknown uses.  If we see pointer arithmetic, keep
/// track of whether it moves the pointer (with IsOffset) but otherwise traverse
/// the uses.  If we see a memcpy/memmove that targets an unoffseted pointer to
/// the alloca, and if the source pointer is a pointer to a constant memory
/// location, we can optimize this.
static bool
isOnlyCopiedFromConstantMemory(AAResults *AA, AllocaInst *V,
                               MemTransferInst *&TheCopy,
                               SmallVectorImpl<Instruction *> &ToDelete) {}

/// isOnlyCopiedFromConstantMemory - Return true if the specified alloca is only
/// modified by a copy from a constant memory location. If we can prove this, we
/// can replace any uses of the alloca with uses of the memory location
/// directly.
static MemTransferInst *
isOnlyCopiedFromConstantMemory(AAResults *AA,
                               AllocaInst *AI,
                               SmallVectorImpl<Instruction *> &ToDelete) {}

/// Returns true if V is dereferenceable for size of alloca.
static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI,
                                           const DataLayout &DL) {}

static Instruction *simplifyAllocaArraySize(InstCombinerImpl &IC,
                                            AllocaInst &AI, DominatorTree &DT) {}

namespace {
// If I and V are pointers in different address space, it is not allowed to
// use replaceAllUsesWith since I and V have different types. A
// non-target-specific transformation should not use addrspacecast on V since
// the two address space may be disjoint depending on target.
//
// This class chases down uses of the old pointer until reaching the load
// instructions, then replaces the old pointer in the load instructions with
// the new pointer. If during the chasing it sees bitcast or GEP, it will
// create new bitcast or GEP with the new pointer and use them in the load
// instruction.
class PointerReplacer {};
} // end anonymous namespace

bool PointerReplacer::collectUsers() {}

bool PointerReplacer::collectUsersRecursive(Instruction &I) {}

Value *PointerReplacer::getReplacement(Value *V) {}

void PointerReplacer::replace(Instruction *I) {}

void PointerReplacer::replacePointer(Value *V) {}

Instruction *InstCombinerImpl::visitAllocaInst(AllocaInst &AI) {}

// Are we allowed to form a atomic load or store of this type?
static bool isSupportedAtomicType(Type *Ty) {}

/// Helper to combine a load to a new type.
///
/// This just does the work of combining a load to a new type. It handles
/// metadata, etc., and returns the new instruction. The \c NewTy should be the
/// loaded *value* type. This will convert it to a pointer, cast the operand to
/// that pointer type, load it, etc.
///
/// Note that this will create all of the instructions with whatever insert
/// point the \c InstCombinerImpl currently is using.
LoadInst *InstCombinerImpl::combineLoadToNewType(LoadInst &LI, Type *NewTy,
                                                 const Twine &Suffix) {}

/// Combine a store to a new type.
///
/// Returns the newly created store instruction.
static StoreInst *combineStoreToNewValue(InstCombinerImpl &IC, StoreInst &SI,
                                         Value *V) {}

/// Combine loads to match the type of their uses' value after looking
/// through intervening bitcasts.
///
/// The core idea here is that if the result of a load is used in an operation,
/// we should load the type most conducive to that operation. For example, when
/// loading an integer and converting that immediately to a pointer, we should
/// instead directly load a pointer.
///
/// However, this routine must never change the width of a load or the number of
/// loads as that would introduce a semantic change. This combine is expected to
/// be a semantic no-op which just allows loads to more closely model the types
/// of their consuming operations.
///
/// Currently, we also refuse to change the precise type used for an atomic load
/// or a volatile load. This is debatable, and might be reasonable to change
/// later. However, it is risky in case some backend or other part of LLVM is
/// relying on the exact type loaded to select appropriate atomic operations.
static Instruction *combineLoadToOperationType(InstCombinerImpl &IC,
                                               LoadInst &Load) {}

static Instruction *unpackLoadToAggregate(InstCombinerImpl &IC, LoadInst &LI) {}

// If we can determine that all possible objects pointed to by the provided
// pointer value are, not only dereferenceable, but also definitively less than
// or equal to the provided maximum size, then return true. Otherwise, return
// false (constant global values and allocas fall into this category).
//
// FIXME: This should probably live in ValueTracking (or similar).
static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
                                     const DataLayout &DL) {}

// If we're indexing into an object of a known size, and the outer index is
// not a constant, but having any value but zero would lead to undefined
// behavior, replace it with zero.
//
// For example, if we have:
// @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
// ...
// %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
// ... = load i32* %arrayidx, align 4
// Then we know that we can replace %x in the GEP with i64 0.
//
// FIXME: We could fold any GEP index to zero that would cause UB if it were
// not zero. Currently, we only handle the first such index. Also, we could
// also search through non-zero constant indices if we kept track of the
// offsets those indices implied.
static bool canReplaceGEPIdxWithZero(InstCombinerImpl &IC,
                                     GetElementPtrInst *GEPI, Instruction *MemI,
                                     unsigned &Idx) {}

// If we're indexing into an object with a variable index for the memory
// access, but the object has only one element, we can assume that the index
// will always be zero. If we replace the GEP, return it.
static Instruction *replaceGEPIdxWithZero(InstCombinerImpl &IC, Value *Ptr,
                                          Instruction &MemI) {}

static bool canSimplifyNullStoreOrGEP(StoreInst &SI) {}

static bool canSimplifyNullLoadOrGEP(LoadInst &LI, Value *Op) {}

Instruction *InstCombinerImpl::visitLoadInst(LoadInst &LI) {}

/// Look for extractelement/insertvalue sequence that acts like a bitcast.
///
/// \returns underlying value that was "cast", or nullptr otherwise.
///
/// For example, if we have:
///
///     %E0 = extractelement <2 x double> %U, i32 0
///     %V0 = insertvalue [2 x double] undef, double %E0, 0
///     %E1 = extractelement <2 x double> %U, i32 1
///     %V1 = insertvalue [2 x double] %V0, double %E1, 1
///
/// and the layout of a <2 x double> is isomorphic to a [2 x double],
/// then %V1 can be safely approximated by a conceptual "bitcast" of %U.
/// Note that %U may contain non-undef values where %V1 has undef.
static Value *likeBitCastFromVector(InstCombinerImpl &IC, Value *V) {}

/// Combine stores to match the type of value being stored.
///
/// The core idea here is that the memory does not have any intrinsic type and
/// where we can we should match the type of a store to the type of value being
/// stored.
///
/// However, this routine must never change the width of a store or the number of
/// stores as that would introduce a semantic change. This combine is expected to
/// be a semantic no-op which just allows stores to more closely model the types
/// of their incoming values.
///
/// Currently, we also refuse to change the precise type used for an atomic or
/// volatile store. This is debatable, and might be reasonable to change later.
/// However, it is risky in case some backend or other part of LLVM is relying
/// on the exact type stored to select appropriate atomic operations.
///
/// \returns true if the store was successfully combined away. This indicates
/// the caller must erase the store instruction. We have to let the caller erase
/// the store instruction as otherwise there is no way to signal whether it was
/// combined or not: IC.EraseInstFromFunction returns a null pointer.
static bool combineStoreToValueType(InstCombinerImpl &IC, StoreInst &SI) {}

static bool unpackStoreToAggregate(InstCombinerImpl &IC, StoreInst &SI) {}

/// equivalentAddressValues - Test if A and B will obviously have the same
/// value. This includes recognizing that %t0 and %t1 will have the same
/// value in code like this:
///   %t0 = getelementptr \@a, 0, 3
///   store i32 0, i32* %t0
///   %t1 = getelementptr \@a, 0, 3
///   %t2 = load i32* %t1
///
static bool equivalentAddressValues(Value *A, Value *B) {}

Instruction *InstCombinerImpl::visitStoreInst(StoreInst &SI) {}

/// Try to transform:
///   if () { *P = v1; } else { *P = v2 }
/// or:
///   *P = v1; if () { *P = v2; }
/// into a phi node with a store in the successor.
bool InstCombinerImpl::mergeStoreIntoSuccessor(StoreInst &SI) {}