llvm/llvm/lib/Transforms/Scalar/SROA.cpp

//===- SROA.cpp - Scalar Replacement Of Aggregates ------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This transformation implements the well known scalar replacement of
/// aggregates transformation. It tries to identify promotable elements of an
/// aggregate alloca, and promote them to registers. It will also try to
/// convert uses of an element (or set of elements) of an alloca into a vector
/// or bitfield-style integer scalar if appropriate.
///
/// It works to do this with minimal slicing of the alloca so that regions
/// which are merely transferred in and out of external memory remain unchanged
/// and are not decomposed to scalar code.
///
/// Because this also performs alloca promotion, it can be thought of as also
/// serving the purpose of SSA formation. The algorithm iterates on the
/// function until all opportunities for promotion have been realized.
///
//===----------------------------------------------------------------------===//

#include "llvm/Transforms/Scalar/SROA.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/DomTreeUpdater.h"
#include "llvm/Analysis/GlobalsModRef.h"
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/PtrUseVisitor.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/ConstantFolder.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DIBuilder.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InstVisitor.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Use.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/InitializePasses.h"
#include "llvm/Pass.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <iterator>
#include <string>
#include <tuple>
#include <utility>
#include <variant>
#include <vector>

usingnamespacellvm;

#define DEBUG_TYPE

STATISTIC(NumAllocasAnalyzed, "Number of allocas analyzed for replacement");
STATISTIC(NumAllocaPartitions, "Number of alloca partitions formed");
STATISTIC(MaxPartitionsPerAlloca, "Maximum number of partitions per alloca");
STATISTIC(NumAllocaPartitionUses, "Number of alloca partition uses rewritten");
STATISTIC(MaxUsesPerAllocaPartition, "Maximum number of uses of a partition");
STATISTIC(NumNewAllocas, "Number of new, smaller allocas introduced");
STATISTIC(NumPromoted, "Number of allocas promoted to SSA values");
STATISTIC(NumLoadsSpeculated, "Number of loads speculated to allow promotion");
STATISTIC(NumLoadsPredicated,
          "Number of loads rewritten into predicated loads to allow promotion");
STATISTIC(
    NumStoresPredicated,
    "Number of stores rewritten into predicated loads to allow promotion");
STATISTIC(NumDeleted, "Number of instructions deleted");
STATISTIC(NumVectorized, "Number of vectorized aggregates");

/// Disable running mem2reg during SROA in order to test or debug SROA.
static cl::opt<bool> SROASkipMem2Reg("sroa-skip-mem2reg", cl::init(false),
                                     cl::Hidden);
namespace {

class AllocaSliceRewriter;
class AllocaSlices;
class Partition;

class SelectHandSpeculativity {};
static_assert;

PossiblySpeculatableLoad;
UnspeculatableStore;
RewriteableMemOp;
RewriteableMemOps;

/// An optimization pass providing Scalar Replacement of Aggregates.
///
/// This pass takes allocations which can be completely analyzed (that is, they
/// don't escape) and tries to turn them into scalar SSA values. There are
/// a few steps to this process.
///
/// 1) It takes allocations of aggregates and analyzes the ways in which they
///    are used to try to split them into smaller allocations, ideally of
///    a single scalar data type. It will split up memcpy and memset accesses
///    as necessary and try to isolate individual scalar accesses.
/// 2) It will transform accesses into forms which are suitable for SSA value
///    promotion. This can be replacing a memset with a scalar store of an
///    integer value, or it can involve speculating operations on a PHI or
///    select to be a PHI or select of the results.
/// 3) Finally, this will try to detect a pattern of accesses which map cleanly
///    onto insert and extract operations on a vector value, and convert them to
///    this form. By doing so, it will enable promotion of vector aggregates to
///    SSA vector values.
class SROA {};

} // end anonymous namespace

/// Calculate the fragment of a variable to use when slicing a store
/// based on the slice dimensions, existing fragment, and base storage
/// fragment.
/// Results:
/// UseFrag - Use Target as the new fragment.
/// UseNoFrag - The new slice already covers the whole variable.
/// Skip - The new alloca slice doesn't include this variable.
/// FIXME: Can we use calculateFragmentIntersect instead?
namespace {
enum FragCalcResult {};
}
static FragCalcResult
calculateFragment(DILocalVariable *Variable,
                  uint64_t NewStorageSliceOffsetInBits,
                  uint64_t NewStorageSliceSizeInBits,
                  std::optional<DIExpression::FragmentInfo> StorageFragment,
                  std::optional<DIExpression::FragmentInfo> CurrentFragment,
                  DIExpression::FragmentInfo &Target) {}

static DebugVariable getAggregateVariable(DbgVariableIntrinsic *DVI) {}
static DebugVariable getAggregateVariable(DbgVariableRecord *DVR) {}

/// Helpers for handling new and old debug info modes in migrateDebugInfo.
/// These overloads unwrap a DbgInstPtr {Instruction* | DbgRecord*} union based
/// on the \p Unused parameter type.
DbgVariableRecord *UnwrapDbgInstPtr(DbgInstPtr P, DbgVariableRecord *Unused) {}
DbgAssignIntrinsic *UnwrapDbgInstPtr(DbgInstPtr P, DbgAssignIntrinsic *Unused) {}

/// Find linked dbg.assign and generate a new one with the correct
/// FragmentInfo. Link Inst to the new dbg.assign.  If Value is nullptr the
/// value component is copied from the old dbg.assign to the new.
/// \param OldAlloca             Alloca for the variable before splitting.
/// \param IsSplit               True if the store (not necessarily alloca)
///                              is being split.
/// \param OldAllocaOffsetInBits Offset of the slice taken from OldAlloca.
/// \param SliceSizeInBits       New number of bits being written to.
/// \param OldInst               Instruction that is being split.
/// \param Inst                  New instruction performing this part of the
///                              split store.
/// \param Dest                  Store destination.
/// \param Value                 Stored value.
/// \param DL                    Datalayout.
static void migrateDebugInfo(AllocaInst *OldAlloca, bool IsSplit,
                             uint64_t OldAllocaOffsetInBits,
                             uint64_t SliceSizeInBits, Instruction *OldInst,
                             Instruction *Inst, Value *Dest, Value *Value,
                             const DataLayout &DL) {}

namespace {

/// A custom IRBuilder inserter which prefixes all names, but only in
/// Assert builds.
class IRBuilderPrefixedInserter final : public IRBuilderDefaultInserter {};

/// Provide a type for IRBuilder that drops names in release builds.
IRBuilderTy;

/// A used slice of an alloca.
///
/// This structure represents a slice of an alloca used by some instruction. It
/// stores both the begin and end offsets of this use, a pointer to the use
/// itself, and a flag indicating whether we can classify the use as splittable
/// or not when forming partitions of the alloca.
class Slice {};

/// Representation of the alloca slices.
///
/// This class represents the slices of an alloca which are formed by its
/// various uses. If a pointer escapes, we can't fully build a representation
/// for the slices used and we reflect that in this structure. The uses are
/// stored, sorted by increasing beginning offset and with unsplittable slices
/// starting at a particular offset before splittable slices.
class AllocaSlices {};

/// A partition of the slices.
///
/// An ephemeral representation for a range of slices which can be viewed as
/// a partition of the alloca. This range represents a span of the alloca's
/// memory which cannot be split, and provides access to all of the slices
/// overlapping some part of the partition.
///
/// Objects of this type are produced by traversing the alloca's slices, but
/// are only ephemeral and not persistent.
class Partition {};

} // end anonymous namespace

/// An iterator over partitions of the alloca's slices.
///
/// This iterator implements the core algorithm for partitioning the alloca's
/// slices. It is a forward iterator as we don't support backtracking for
/// efficiency reasons, and re-use a single storage area to maintain the
/// current set of split slices.
///
/// It is templated on the slice iterator type to use so that it can operate
/// with either const or non-const slice iterators.
class AllocaSlices::partition_iterator
    : public iterator_facade_base<partition_iterator, std::forward_iterator_tag,
                                  Partition> {};

/// A forward range over the partitions of the alloca's slices.
///
/// This accesses an iterator range over the partitions of the alloca's
/// slices. It computes these partitions on the fly based on the overlapping
/// offsets of the slices and the ability to split them. It will visit "empty"
/// partitions to cover regions of the alloca only accessed via split
/// slices.
iterator_range<AllocaSlices::partition_iterator> AllocaSlices::partitions() {}

static Value *foldSelectInst(SelectInst &SI) {}

/// A helper that folds a PHI node or a select.
static Value *foldPHINodeOrSelectInst(Instruction &I) {}

/// Builder for the alloca slices.
///
/// This class builds a set of alloca slices by recursively visiting the uses
/// of an alloca and making a slice for each load and store at each offset.
class AllocaSlices::SliceBuilder : public PtrUseVisitor<SliceBuilder> {};

AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI)
    :{}

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)

void AllocaSlices::print(raw_ostream &OS, const_iterator I,
                         StringRef Indent) const {
  printSlice(OS, I, Indent);
  OS << "\n";
  printUse(OS, I, Indent);
}

void AllocaSlices::printSlice(raw_ostream &OS, const_iterator I,
                              StringRef Indent) const {
  OS << Indent << "[" << I->beginOffset() << "," << I->endOffset() << ")"
     << " slice #" << (I - begin())
     << (I->isSplittable() ? " (splittable)" : "");
}

void AllocaSlices::printUse(raw_ostream &OS, const_iterator I,
                            StringRef Indent) const {
  OS << Indent << "  used by: " << *I->getUse()->getUser() << "\n";
}

void AllocaSlices::print(raw_ostream &OS) const {
  if (PointerEscapingInstr) {
    OS << "Can't analyze slices for alloca: " << AI << "\n"
       << "  A pointer to this alloca escaped by:\n"
       << "  " << *PointerEscapingInstr << "\n";
    return;
  }

  OS << "Slices of alloca: " << AI << "\n";
  for (const_iterator I = begin(), E = end(); I != E; ++I)
    print(OS, I);
}

LLVM_DUMP_METHOD void AllocaSlices::dump(const_iterator I) const {
  print(dbgs(), I);
}
LLVM_DUMP_METHOD void AllocaSlices::dump() const { print(dbgs()); }

#endif // !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)

/// Walk the range of a partitioning looking for a common type to cover this
/// sequence of slices.
static std::pair<Type *, IntegerType *>
findCommonType(AllocaSlices::const_iterator B, AllocaSlices::const_iterator E,
               uint64_t EndOffset) {}

/// PHI instructions that use an alloca and are subsequently loaded can be
/// rewritten to load both input pointers in the pred blocks and then PHI the
/// results, allowing the load of the alloca to be promoted.
/// From this:
///   %P2 = phi [i32* %Alloca, i32* %Other]
///   %V = load i32* %P2
/// to:
///   %V1 = load i32* %Alloca      -> will be mem2reg'd
///   ...
///   %V2 = load i32* %Other
///   ...
///   %V = phi [i32 %V1, i32 %V2]
///
/// We can do this to a select if its only uses are loads and if the operands
/// to the select can be loaded unconditionally.
///
/// FIXME: This should be hoisted into a generic utility, likely in
/// Transforms/Util/Local.h
static bool isSafePHIToSpeculate(PHINode &PN) {}

static void speculatePHINodeLoads(IRBuilderTy &IRB, PHINode &PN) {}

SelectHandSpeculativity &
SelectHandSpeculativity::setAsSpeculatable(bool isTrueVal) {}

bool SelectHandSpeculativity::isSpeculatable(bool isTrueVal) const {}

bool SelectHandSpeculativity::areAllSpeculatable() const {}

bool SelectHandSpeculativity::areAnySpeculatable() const {}
bool SelectHandSpeculativity::areNoneSpeculatable() const {}

static SelectHandSpeculativity
isSafeLoadOfSelectToSpeculate(LoadInst &LI, SelectInst &SI, bool PreserveCFG) {}

std::optional<RewriteableMemOps>
SROA::isSafeSelectToSpeculate(SelectInst &SI, bool PreserveCFG) {}

static void speculateSelectInstLoads(SelectInst &SI, LoadInst &LI,
                                     IRBuilderTy &IRB) {}

template <typename T>
static void rewriteMemOpOfSelect(SelectInst &SI, T &I,
                                 SelectHandSpeculativity Spec,
                                 DomTreeUpdater &DTU) {}

static void rewriteMemOpOfSelect(SelectInst &SelInst, Instruction &I,
                                 SelectHandSpeculativity Spec,
                                 DomTreeUpdater &DTU) {}

static bool rewriteSelectInstMemOps(SelectInst &SI,
                                    const RewriteableMemOps &Ops,
                                    IRBuilderTy &IRB, DomTreeUpdater *DTU) {}

/// Compute an adjusted pointer from Ptr by Offset bytes where the
/// resulting pointer has PointerTy.
static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr,
                             APInt Offset, Type *PointerTy,
                             const Twine &NamePrefix) {}

/// Compute the adjusted alignment for a load or store from an offset.
static Align getAdjustedAlignment(Instruction *I, uint64_t Offset) {}

/// Test whether we can convert a value from the old to the new type.
///
/// This predicate should be used to guard calls to convertValue in order to
/// ensure that we only try to convert viable values. The strategy is that we
/// will peel off single element struct and array wrappings to get to an
/// underlying value, and convert that value.
static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy) {}

/// Generic routine to convert an SSA value to a value of a different
/// type.
///
/// This will try various different casting techniques, such as bitcasts,
/// inttoptr, and ptrtoint casts. Use the \c canConvertValue predicate to test
/// two types for viability with this routine.
static Value *convertValue(const DataLayout &DL, IRBuilderTy &IRB, Value *V,
                           Type *NewTy) {}

/// Test whether the given slice use can be promoted to a vector.
///
/// This function is called to test each entry in a partition which is slated
/// for a single slice.
static bool isVectorPromotionViableForSlice(Partition &P, const Slice &S,
                                            VectorType *Ty,
                                            uint64_t ElementSize,
                                            const DataLayout &DL) {}

/// Test whether a vector type is viable for promotion.
///
/// This implements the necessary checking for \c checkVectorTypesForPromotion
/// (and thus isVectorPromotionViable) over all slices of the alloca for the
/// given VectorType.
static bool checkVectorTypeForPromotion(Partition &P, VectorType *VTy,
                                        const DataLayout &DL) {}

/// Test whether any vector type in \p CandidateTys is viable for promotion.
///
/// This implements the necessary checking for \c isVectorPromotionViable over
/// all slices of the alloca for the given VectorType.
static VectorType *
checkVectorTypesForPromotion(Partition &P, const DataLayout &DL,
                             SmallVectorImpl<VectorType *> &CandidateTys,
                             bool HaveCommonEltTy, Type *CommonEltTy,
                             bool HaveVecPtrTy, bool HaveCommonVecPtrTy,
                             VectorType *CommonVecPtrTy) {}

static VectorType *createAndCheckVectorTypesForPromotion(
    SetVector<Type *> &OtherTys, ArrayRef<VectorType *> CandidateTysCopy,
    function_ref<void(Type *)> CheckCandidateType, Partition &P,
    const DataLayout &DL, SmallVectorImpl<VectorType *> &CandidateTys,
    bool &HaveCommonEltTy, Type *&CommonEltTy, bool &HaveVecPtrTy,
    bool &HaveCommonVecPtrTy, VectorType *&CommonVecPtrTy) {}

/// Test whether the given alloca partitioning and range of slices can be
/// promoted to a vector.
///
/// This is a quick test to check whether we can rewrite a particular alloca
/// partition (and its newly formed alloca) into a vector alloca with only
/// whole-vector loads and stores such that it could be promoted to a vector
/// SSA value. We only can ensure this for a limited set of operations, and we
/// don't want to do the rewrites unless we are confident that the result will
/// be promotable, so we have an early test here.
static VectorType *isVectorPromotionViable(Partition &P, const DataLayout &DL) {}

/// Test whether a slice of an alloca is valid for integer widening.
///
/// This implements the necessary checking for the \c isIntegerWideningViable
/// test below on a single slice of the alloca.
static bool isIntegerWideningViableForSlice(const Slice &S,
                                            uint64_t AllocBeginOffset,
                                            Type *AllocaTy,
                                            const DataLayout &DL,
                                            bool &WholeAllocaOp) {}

/// Test whether the given alloca partition's integer operations can be
/// widened to promotable ones.
///
/// This is a quick test to check whether we can rewrite the integer loads and
/// stores to a particular alloca into wider loads and stores and be able to
/// promote the resulting alloca.
static bool isIntegerWideningViable(Partition &P, Type *AllocaTy,
                                    const DataLayout &DL) {}

static Value *extractInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *V,
                             IntegerType *Ty, uint64_t Offset,
                             const Twine &Name) {}

static Value *insertInteger(const DataLayout &DL, IRBuilderTy &IRB, Value *Old,
                            Value *V, uint64_t Offset, const Twine &Name) {}

static Value *extractVector(IRBuilderTy &IRB, Value *V, unsigned BeginIndex,
                            unsigned EndIndex, const Twine &Name) {}

static Value *insertVector(IRBuilderTy &IRB, Value *Old, Value *V,
                           unsigned BeginIndex, const Twine &Name) {}

namespace {

/// Visitor to rewrite instructions using p particular slice of an alloca
/// to use a new alloca.
///
/// Also implements the rewriting to vector-based accesses when the partition
/// passes the isVectorPromotionViable predicate. Most of the rewriting logic
/// lives here.
class AllocaSliceRewriter : public InstVisitor<AllocaSliceRewriter, bool> {};

/// Visitor to rewrite aggregate loads and stores as scalar.
///
/// This pass aggressively rewrites all aggregate loads and stores on
/// a particular pointer (or any pointer derived from it which we can identify)
/// with scalar loads and stores.
class AggLoadStoreRewriter : public InstVisitor<AggLoadStoreRewriter, bool> {};

} // end anonymous namespace

/// Strip aggregate type wrapping.
///
/// This removes no-op aggregate types wrapping an underlying type. It will
/// strip as many layers of types as it can without changing either the type
/// size or the allocated size.
static Type *stripAggregateTypeWrapping(const DataLayout &DL, Type *Ty) {}

/// Try to find a partition of the aggregate type passed in for a given
/// offset and size.
///
/// This recurses through the aggregate type and tries to compute a subtype
/// based on the offset and size. When the offset and size span a sub-section
/// of an array, it will even compute a new array type for that sub-section,
/// and the same for structs.
///
/// Note that this routine is very strict and tries to find a partition of the
/// type which produces the *exact* right offset and size. It is not forgiving
/// when the size or offset cause either end of type-based partition to be off.
/// Also, this is a best-effort routine. It is reasonable to give up and not
/// return a type if necessary.
static Type *getTypePartition(const DataLayout &DL, Type *Ty, uint64_t Offset,
                              uint64_t Size) {}

/// Pre-split loads and stores to simplify rewriting.
///
/// We want to break up the splittable load+store pairs as much as
/// possible. This is important to do as a preprocessing step, as once we
/// start rewriting the accesses to partitions of the alloca we lose the
/// necessary information to correctly split apart paired loads and stores
/// which both point into this alloca. The case to consider is something like
/// the following:
///
///   %a = alloca [12 x i8]
///   %gep1 = getelementptr i8, ptr %a, i32 0
///   %gep2 = getelementptr i8, ptr %a, i32 4
///   %gep3 = getelementptr i8, ptr %a, i32 8
///   store float 0.0, ptr %gep1
///   store float 1.0, ptr %gep2
///   %v = load i64, ptr %gep1
///   store i64 %v, ptr %gep2
///   %f1 = load float, ptr %gep2
///   %f2 = load float, ptr %gep3
///
/// Here we want to form 3 partitions of the alloca, each 4 bytes large, and
/// promote everything so we recover the 2 SSA values that should have been
/// there all along.
///
/// \returns true if any changes are made.
bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) {}

/// Rewrite an alloca partition's users.
///
/// This routine drives both of the rewriting goals of the SROA pass. It tries
/// to rewrite uses of an alloca partition to be conducive for SSA value
/// promotion. If the partition needs a new, more refined alloca, this will
/// build that new alloca, preserving as much type information as possible, and
/// rewrite the uses of the old alloca to point at the new one and have the
/// appropriate new offsets. It also evaluates how successful the rewrite was
/// at enabling promotion and if it was successful queues the alloca to be
/// promoted.
AllocaInst *SROA::rewritePartition(AllocaInst &AI, AllocaSlices &AS,
                                   Partition &P) {}

// There isn't a shared interface to get the "address" parts out of a
// dbg.declare and dbg.assign, so provide some wrappers now for
// both debug intrinsics and records.
const Value *getAddress(const DbgVariableIntrinsic *DVI) {}

const Value *getAddress(const DbgVariableRecord *DVR) {}

bool isKillAddress(const DbgVariableIntrinsic *DVI) {}

bool isKillAddress(const DbgVariableRecord *DVR) {}

const DIExpression *getAddressExpression(const DbgVariableIntrinsic *DVI) {}

const DIExpression *getAddressExpression(const DbgVariableRecord *DVR) {}

/// Create or replace an existing fragment in a DIExpression with \p Frag.
/// If the expression already contains a DW_OP_LLVM_extract_bits_[sz]ext
/// operation, add \p BitExtractOffset to the offset part.
///
/// Returns the new expression, or nullptr if this fails (see details below).
///
/// This function is similar to DIExpression::createFragmentExpression except
/// for 3 important distinctions:
///   1. The new fragment isn't relative to an existing fragment.
///   2. It assumes the computed location is a memory location. This means we
///      don't need to perform checks that creating the fragment preserves the
///      expression semantics.
///   3. Existing extract_bits are modified independently of fragment changes
///      using \p BitExtractOffset. A change to the fragment offset or size
///      may affect a bit extract. But a bit extract offset can change
///      independently of the fragment dimensions.
///
/// Returns the new expression, or nullptr if one couldn't be created.
/// Ideally this is only used to signal that a bit-extract has become
/// zero-sized (and thus the new debug record has no size and can be
/// dropped), however, it fails for other reasons too - see the FIXME below.
///
/// FIXME: To keep the change that introduces this function NFC it bails
/// in some situations unecessarily, e.g. when fragment and bit extract
/// sizes differ.
static DIExpression *createOrReplaceFragment(const DIExpression *Expr,
                                             DIExpression::FragmentInfo Frag,
                                             int64_t BitExtractOffset) {}

/// Insert a new dbg.declare.
/// \p Orig Original to copy debug loc and variable from.
/// \p NewAddr Location's new base address.
/// \p NewAddrExpr New expression to apply to address.
/// \p BeforeInst Insert position.
/// \p NewFragment New fragment (absolute, non-relative).
/// \p BitExtractAdjustment Offset to apply to any extract_bits op.
static void
insertNewDbgInst(DIBuilder &DIB, DbgDeclareInst *Orig, AllocaInst *NewAddr,
                 DIExpression *NewAddrExpr, Instruction *BeforeInst,
                 std::optional<DIExpression::FragmentInfo> NewFragment,
                 int64_t BitExtractAdjustment) {}

/// Insert a new dbg.assign.
/// \p Orig Original to copy debug loc, variable, value and value expression
///    from.
/// \p NewAddr Location's new base address.
/// \p NewAddrExpr New expression to apply to address.
/// \p BeforeInst Insert position.
/// \p NewFragment New fragment (absolute, non-relative).
/// \p BitExtractAdjustment Offset to apply to any extract_bits op.
static void
insertNewDbgInst(DIBuilder &DIB, DbgAssignIntrinsic *Orig, AllocaInst *NewAddr,
                 DIExpression *NewAddrExpr, Instruction *BeforeInst,
                 std::optional<DIExpression::FragmentInfo> NewFragment,
                 int64_t BitExtractAdjustment) {}

/// Insert a new DbgRecord.
/// \p Orig Original to copy record type, debug loc and variable from, and
///    additionally value and value expression for dbg_assign records.
/// \p NewAddr Location's new base address.
/// \p NewAddrExpr New expression to apply to address.
/// \p BeforeInst Insert position.
/// \p NewFragment New fragment (absolute, non-relative).
/// \p BitExtractAdjustment Offset to apply to any extract_bits op.
static void
insertNewDbgInst(DIBuilder &DIB, DbgVariableRecord *Orig, AllocaInst *NewAddr,
                 DIExpression *NewAddrExpr, Instruction *BeforeInst,
                 std::optional<DIExpression::FragmentInfo> NewFragment,
                 int64_t BitExtractAdjustment) {}

/// Walks the slices of an alloca and form partitions based on them,
/// rewriting each of their uses.
bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) {}

/// Clobber a use with poison, deleting the used value if it becomes dead.
void SROA::clobberUse(Use &U) {}

/// Analyze an alloca for SROA.
///
/// This analyzes the alloca to ensure we can reason about it, builds
/// the slices of the alloca, and then hands it off to be split and
/// rewritten as needed.
std::pair<bool /*Changed*/, bool /*CFGChanged*/>
SROA::runOnAlloca(AllocaInst &AI) {}

/// Delete the dead instructions accumulated in this run.
///
/// Recursively deletes the dead instructions we've accumulated. This is done
/// at the very end to maximize locality of the recursive delete and to
/// minimize the problems of invalidated instruction pointers as such pointers
/// are used heavily in the intermediate stages of the algorithm.
///
/// We also record the alloca instructions deleted here so that they aren't
/// subsequently handed to mem2reg to promote.
bool SROA::deleteDeadInstructions(
    SmallPtrSetImpl<AllocaInst *> &DeletedAllocas) {}
/// Promote the allocas, using the best available technique.
///
/// This attempts to promote whatever allocas have been identified as viable in
/// the PromotableAllocas list. If that list is empty, there is nothing to do.
/// This function returns whether any promotion occurred.
bool SROA::promoteAllocas(Function &F) {}

std::pair<bool /*Changed*/, bool /*CFGChanged*/> SROA::runSROA(Function &F) {}

PreservedAnalyses SROAPass::run(Function &F, FunctionAnalysisManager &AM) {}

void SROAPass::printPipeline(
    raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {}

SROAPass::SROAPass(SROAOptions PreserveCFG) :{}

namespace {

/// A legacy pass for the legacy pass manager that wraps the \c SROA pass.
class SROALegacyPass : public FunctionPass {};

} // end anonymous namespace

char SROALegacyPass::ID =;

FunctionPass *llvm::createSROAPass(bool PreserveCFG) {}

INITIALIZE_PASS_BEGIN(SROALegacyPass, "sroa",
                      "Scalar Replacement Of Aggregates", false, false)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_END(SROALegacyPass, "sroa", "Scalar Replacement Of Aggregates",
                    false, false)