llvm/llvm/include/llvm/Analysis/MemorySSA.h

//===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file exposes an interface to building/using memory SSA to
/// walk memory instructions using a use/def graph.
///
/// Memory SSA class builds an SSA form that links together memory access
/// instructions such as loads, stores, atomics, and calls. Additionally, it
/// does a trivial form of "heap versioning" Every time the memory state changes
/// in the program, we generate a new heap version. It generates
/// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions.
///
/// As a trivial example,
/// define i32 @main() #0 {
/// entry:
///   %call = call noalias i8* @_Znwm(i64 4) #2
///   %0 = bitcast i8* %call to i32*
///   %call1 = call noalias i8* @_Znwm(i64 4) #2
///   %1 = bitcast i8* %call1 to i32*
///   store i32 5, i32* %0, align 4
///   store i32 7, i32* %1, align 4
///   %2 = load i32* %0, align 4
///   %3 = load i32* %1, align 4
///   %add = add nsw i32 %2, %3
///   ret i32 %add
/// }
///
/// Will become
/// define i32 @main() #0 {
/// entry:
///   ; 1 = MemoryDef(0)
///   %call = call noalias i8* @_Znwm(i64 4) #3
///   %2 = bitcast i8* %call to i32*
///   ; 2 = MemoryDef(1)
///   %call1 = call noalias i8* @_Znwm(i64 4) #3
///   %4 = bitcast i8* %call1 to i32*
///   ; 3 = MemoryDef(2)
///   store i32 5, i32* %2, align 4
///   ; 4 = MemoryDef(3)
///   store i32 7, i32* %4, align 4
///   ; MemoryUse(3)
///   %7 = load i32* %2, align 4
///   ; MemoryUse(4)
///   %8 = load i32* %4, align 4
///   %add = add nsw i32 %7, %8
///   ret i32 %add
/// }
///
/// Given this form, all the stores that could ever effect the load at %8 can be
/// gotten by using the MemoryUse associated with it, and walking from use to
/// def until you hit the top of the function.
///
/// Each def also has a list of users associated with it, so you can walk from
/// both def to users, and users to defs. Note that we disambiguate MemoryUses,
/// but not the RHS of MemoryDefs. You can see this above at %7, which would
/// otherwise be a MemoryUse(4). Being disambiguated means that for a given
/// store, all the MemoryUses on its use lists are may-aliases of that store
/// (but the MemoryDefs on its use list may not be).
///
/// MemoryDefs are not disambiguated because it would require multiple reaching
/// definitions, which would require multiple phis, and multiple memoryaccesses
/// per instruction.
///
/// In addition to the def/use graph described above, MemoryDefs also contain
/// an "optimized" definition use.  The "optimized" use points to some def
/// reachable through the memory def chain.  The optimized def *may* (but is
/// not required to) alias the original MemoryDef, but no def *closer* to the
/// source def may alias it.  As the name implies, the purpose of the optimized
/// use is to allow caching of clobber searches for memory defs.  The optimized
/// def may be nullptr, in which case clients must walk the defining access
/// chain.
///
/// When iterating the uses of a MemoryDef, both defining uses and optimized
/// uses will be encountered.  If only one type is needed, the client must
/// filter the use walk.
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_ANALYSIS_MEMORYSSA_H
#define LLVM_ANALYSIS_MEMORYSSA_H

#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/PHITransAddr.h"
#include "llvm/IR/DerivedUser.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/User.h"
#include "llvm/Pass.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
#include <memory>
#include <utility>

namespace llvm {

template <class GraphType> struct GraphTraits;
class Function;
class Loop;
class LLVMContext;
class MemoryAccess;
class MemorySSAWalker;
class Module;
class raw_ostream;

namespace MSSAHelpers {

struct AllAccessTag {};
struct DefsOnlyTag {};

} // end namespace MSSAHelpers

enum : unsigned {};

template <class T> class memoryaccess_def_iterator_base;
memoryaccess_def_iterator;
const_memoryaccess_def_iterator;

// The base for all memory accesses. All memory accesses in a block are
// linked together using an intrusive list.
class MemoryAccess
    : public DerivedUser,
      public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::AllAccessTag>>,
      public ilist_node<MemoryAccess, ilist_tag<MSSAHelpers::DefsOnlyTag>> {};

template <>
struct ilist_alloc_traits<MemoryAccess> {};

inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) {}

/// Class that has the common methods + fields of memory uses/defs. It's
/// a little awkward to have, but there are many cases where we want either a
/// use or def, and there are many cases where uses are needed (defs aren't
/// acceptable), and vice-versa.
///
/// This class should never be instantiated directly; make a MemoryUse or
/// MemoryDef instead.
class MemoryUseOrDef : public MemoryAccess {};

/// Represents read-only accesses to memory
///
/// In particular, the set of Instructions that will be represented by
/// MemoryUse's is exactly the set of Instructions for which
/// AliasAnalysis::getModRefInfo returns "Ref".
class MemoryUse final : public MemoryUseOrDef {};

template <>
struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS()

/// Represents a read-write access to memory, whether it is a must-alias,
/// or a may-alias.
///
/// In particular, the set of Instructions that will be represented by
/// MemoryDef's is exactly the set of Instructions for which
/// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef".
/// Note that, in order to provide def-def chains, all defs also have a use
/// associated with them. This use points to the nearest reaching
/// MemoryDef/MemoryPhi.
class MemoryDef final : public MemoryUseOrDef {};

template <>
struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 2> {};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS()

template <>
struct OperandTraits<MemoryUseOrDef> {};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS()

/// Represents phi nodes for memory accesses.
///
/// These have the same semantic as regular phi nodes, with the exception that
/// only one phi will ever exist in a given basic block.
/// Guaranteeing one phi per block means guaranteeing there is only ever one
/// valid reaching MemoryDef/MemoryPHI along each path to the phi node.
/// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or
/// a MemoryPhi's operands.
/// That is, given
/// if (a) {
///   store %a
///   store %b
/// }
/// it *must* be transformed into
/// if (a) {
///    1 = MemoryDef(liveOnEntry)
///    store %a
///    2 = MemoryDef(1)
///    store %b
/// }
/// and *not*
/// if (a) {
///    1 = MemoryDef(liveOnEntry)
///    store %a
///    2 = MemoryDef(liveOnEntry)
///    store %b
/// }
/// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the
/// end of the branch, and if there are not two phi nodes, one will be
/// disconnected completely from the SSA graph below that point.
/// Because MemoryUse's do not generate new definitions, they do not have this
/// issue.
class MemoryPhi final : public MemoryAccess {};

inline unsigned MemoryAccess::getID() const {}

inline bool MemoryUseOrDef::isOptimized() const {}

inline MemoryAccess *MemoryUseOrDef::getOptimized() const {}

inline void MemoryUseOrDef::setOptimized(MemoryAccess *MA) {}

inline void MemoryUseOrDef::resetOptimized() {}

template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits {};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS()

/// Encapsulates MemorySSA, including all data associated with memory
/// accesses.
class MemorySSA {};

/// Enables verification of MemorySSA.
///
/// The checks which this flag enables is exensive and disabled by default
/// unless `EXPENSIVE_CHECKS` is defined.  The flag `-verify-memoryssa` can be
/// used to selectively enable the verification without re-compilation.
extern bool VerifyMemorySSA;

// Internal MemorySSA utils, for use by MemorySSA classes and walkers
class MemorySSAUtil {};

/// An analysis that produces \c MemorySSA for a function.
///
class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> {};

/// Printer pass for \c MemorySSA.
class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> {};

/// Printer pass for \c MemorySSA via the walker.
class MemorySSAWalkerPrinterPass
    : public PassInfoMixin<MemorySSAWalkerPrinterPass> {};

/// Verifier pass for \c MemorySSA.
struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> {};

/// Legacy analysis pass which computes \c MemorySSA.
class MemorySSAWrapperPass : public FunctionPass {};

/// This is the generic walker interface for walkers of MemorySSA.
/// Walkers are used to be able to further disambiguate the def-use chains
/// MemorySSA gives you, or otherwise produce better info than MemorySSA gives
/// you.
/// In particular, while the def-use chains provide basic information, and are
/// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a
/// MemoryUse as AliasAnalysis considers it, a user mant want better or other
/// information. In particular, they may want to use SCEV info to further
/// disambiguate memory accesses, or they may want the nearest dominating
/// may-aliasing MemoryDef for a call or a store. This API enables a
/// standardized interface to getting and using that info.
class MemorySSAWalker {};

/// A MemorySSAWalker that does no alias queries, or anything else. It
/// simply returns the links as they were constructed by the builder.
class DoNothingMemorySSAWalker final : public MemorySSAWalker {};

MemoryAccessPair;
ConstMemoryAccessPair;

/// Iterator base class used to implement const and non-const iterators
/// over the defining accesses of a MemoryAccess.
template <class T>
class memoryaccess_def_iterator_base
    : public iterator_facade_base<memoryaccess_def_iterator_base<T>,
                                  std::forward_iterator_tag, T, ptrdiff_t, T *,
                                  T *> {};

inline memoryaccess_def_iterator MemoryAccess::defs_begin() {}

inline const_memoryaccess_def_iterator MemoryAccess::defs_begin() const {}

inline memoryaccess_def_iterator MemoryAccess::defs_end() {}

inline const_memoryaccess_def_iterator MemoryAccess::defs_end() const {}

/// GraphTraits for a MemoryAccess, which walks defs in the normal case,
/// and uses in the inverse case.
template <> struct GraphTraits<MemoryAccess *> {};

template <> struct GraphTraits<Inverse<MemoryAccess *>> {};

/// Provide an iterator that walks defs, giving both the memory access,
/// and the current pointer location, updating the pointer location as it
/// changes due to phi node translation.
///
/// This iterator, while somewhat specialized, is what most clients actually
/// want when walking upwards through MemorySSA def chains. It takes a pair of
/// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the
/// memory location through phi nodes for the user.
class upward_defs_iterator
    : public iterator_facade_base<upward_defs_iterator,
                                  std::forward_iterator_tag,
                                  const MemoryAccessPair> {};

inline upward_defs_iterator
upward_defs_begin(const MemoryAccessPair &Pair, DominatorTree &DT) {}

inline upward_defs_iterator upward_defs_end() {}

inline iterator_range<upward_defs_iterator>
upward_defs(const MemoryAccessPair &Pair, DominatorTree &DT) {}

/// Walks the defining accesses of MemoryDefs. Stops after we hit something that
/// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when
/// comparing against a null def_chain_iterator, this will compare equal only
/// after walking said Phi/liveOnEntry.
///
/// The UseOptimizedChain flag specifies whether to walk the clobbering
/// access chain, or all the accesses.
///
/// Normally, MemoryDef are all just def/use linked together, so a def_chain on
/// a MemoryDef will walk all MemoryDefs above it in the program until it hits
/// a phi node.  The optimized chain walks the clobbering access of a store.
/// So if you are just trying to find, given a store, what the next
/// thing that would clobber the same memory is, you want the optimized chain.
template <class T, bool UseOptimizedChain = false>
struct def_chain_iterator
    : public iterator_facade_base<def_chain_iterator<T, UseOptimizedChain>,
                                  std::forward_iterator_tag, MemoryAccess *> {};

template <class T>
inline iterator_range<def_chain_iterator<T>>
def_chain(T MA, MemoryAccess *UpTo = nullptr) {}

template <class T>
inline iterator_range<def_chain_iterator<T, true>> optimized_def_chain(T MA) {}

} // end namespace llvm

#endif // LLVM_ANALYSIS_MEMORYSSA_H