llvm/llvm/lib/Target/NVPTX/NVPTXLowerArgs.cpp

//===-- NVPTXLowerArgs.cpp - Lower arguments ------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
//
// Arguments to kernel and device functions are passed via param space,
// which imposes certain restrictions:
// http://docs.nvidia.com/cuda/parallel-thread-execution/#state-spaces
//
// Kernel parameters are read-only and accessible only via ld.param
// instruction, directly or via a pointer.
//
// Device function parameters are directly accessible via
// ld.param/st.param, but taking the address of one returns a pointer
// to a copy created in local space which *can't* be used with
// ld.param/st.param.
//
// Copying a byval struct into local memory in IR allows us to enforce
// the param space restrictions, gives the rest of IR a pointer w/o
// param space restrictions, and gives us an opportunity to eliminate
// the copy.
//
// Pointer arguments to kernel functions need more work to be lowered:
//
// 1. Convert non-byval pointer arguments of CUDA kernels to pointers in the
//    global address space. This allows later optimizations to emit
//    ld.global.*/st.global.* for accessing these pointer arguments. For
//    example,
//
//    define void @foo(float* %input) {
//      %v = load float, float* %input, align 4
//      ...
//    }
//
//    becomes
//
//    define void @foo(float* %input) {
//      %input2 = addrspacecast float* %input to float addrspace(1)*
//      %input3 = addrspacecast float addrspace(1)* %input2 to float*
//      %v = load float, float* %input3, align 4
//      ...
//    }
//
//    Later, NVPTXInferAddressSpaces will optimize it to
//
//    define void @foo(float* %input) {
//      %input2 = addrspacecast float* %input to float addrspace(1)*
//      %v = load float, float addrspace(1)* %input2, align 4
//      ...
//    }
//
// 2. Convert byval kernel parameters to pointers in the param address space
//    (so that NVPTX emits ld/st.param).  Convert pointers *within* a byval
//    kernel parameter to pointers in the global address space. This allows
//    NVPTX to emit ld/st.global.
//
//    struct S {
//      int *x;
//      int *y;
//    };
//    __global__ void foo(S s) {
//      int *b = s.y;
//      // use b
//    }
//
//    "b" points to the global address space. In the IR level,
//
//    define void @foo(ptr byval %input) {
//      %b_ptr = getelementptr {ptr, ptr}, ptr %input, i64 0, i32 1
//      %b = load ptr, ptr %b_ptr
//      ; use %b
//    }
//
//    becomes
//
//    define void @foo({i32*, i32*}* byval %input) {
//      %b_param = addrspacecat ptr %input to ptr addrspace(101)
//      %b_ptr = getelementptr {ptr, ptr}, ptr addrspace(101) %b_param, i64 0, i32 1
//      %b = load ptr, ptr addrspace(101) %b_ptr
//      %b_global = addrspacecast ptr %b to ptr addrspace(1)
//      ; use %b_generic
//    }
//
//    Create a local copy of kernel byval parameters used in a way that *might* mutate
//    the parameter, by storing it in an alloca. Mutations to "grid_constant" parameters
//    are undefined behaviour, and don't require local copies.
//
//    define void @foo(ptr byval(%struct.s) align 4 %input) {
//       store i32 42, ptr %input
//       ret void
//    }
//
//    becomes
//
//    define void @foo(ptr byval(%struct.s) align 4 %input) #1 {
//      %input1 = alloca %struct.s, align 4
//      %input2 = addrspacecast ptr %input to ptr addrspace(101)
//      %input3 = load %struct.s, ptr addrspace(101) %input2, align 4
//      store %struct.s %input3, ptr %input1, align 4
//      store i32 42, ptr %input1, align 4
//      ret void
//    }
//
//    If %input were passed to a device function, or written to memory,
//    conservatively assume that %input gets mutated, and create a local copy.
//
//    Convert param pointers to grid_constant byval kernel parameters that are
//    passed into calls (device functions, intrinsics, inline asm), or otherwise
//    "escape" (into stores/ptrtoints) to the generic address space, using the
//    `nvvm.ptr.param.to.gen` intrinsic, so that NVPTX emits cvta.param
//    (available for sm70+)
//
//    define void @foo(ptr byval(%struct.s) %input) {
//      ; %input is a grid_constant
//      %call = call i32 @escape(ptr %input)
//      ret void
//    }
//
//    becomes
//
//    define void @foo(ptr byval(%struct.s) %input) {
//      %input1 = addrspacecast ptr %input to ptr addrspace(101)
//      ; the following intrinsic converts pointer to generic. We don't use an addrspacecast
//      ; to prevent generic -> param -> generic from getting cancelled out
//      %input1.gen = call ptr @llvm.nvvm.ptr.param.to.gen.p0.p101(ptr addrspace(101) %input1)
//      %call = call i32 @escape(ptr %input1.gen)
//      ret void
//    }
//
// TODO: merge this pass with NVPTXInferAddressSpaces so that other passes don't
// cancel the addrspacecast pair this pass emits.
//===----------------------------------------------------------------------===//

#include "MCTargetDesc/NVPTXBaseInfo.h"
#include "NVPTX.h"
#include "NVPTXTargetMachine.h"
#include "NVPTXUtilities.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/PtrUseVisitor.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/IntrinsicsNVPTX.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/InitializePasses.h"
#include "llvm/Pass.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include <numeric>
#include <queue>

#define DEBUG_TYPE

usingnamespacellvm;

namespace llvm {
void initializeNVPTXLowerArgsPass(PassRegistry &);
}

namespace {
class NVPTXLowerArgs : public FunctionPass {};
} // namespace

char NVPTXLowerArgs::ID =;

INITIALIZE_PASS_BEGIN(NVPTXLowerArgs, "nvptx-lower-args",
                      "Lower arguments (NVPTX)", false, false)
INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
INITIALIZE_PASS_END(NVPTXLowerArgs, "nvptx-lower-args",
                    "Lower arguments (NVPTX)", false, false)

// =============================================================================
// If the function had a byval struct ptr arg, say foo(%struct.x* byval %d),
// and we can't guarantee that the only accesses are loads,
// then add the following instructions to the first basic block:
//
// %temp = alloca %struct.x, align 8
// %tempd = addrspacecast %struct.x* %d to %struct.x addrspace(101)*
// %tv = load %struct.x addrspace(101)* %tempd
// store %struct.x %tv, %struct.x* %temp, align 8
//
// The above code allocates some space in the stack and copies the incoming
// struct from param space to local space.
// Then replace all occurrences of %d by %temp.
//
// In case we know that all users are GEPs or Loads, replace them with the same
// ones in parameter AS, so we can access them using ld.param.
// =============================================================================

// For Loads, replaces the \p OldUse of the pointer with a Use of the same
// pointer in parameter AS.
// For "escapes" (to memory, a function call, or a ptrtoint), cast the OldUse to
// generic using cvta.param.
static void convertToParamAS(Use *OldUse, Value *Param, bool HasCvtaParam,
                             bool IsGridConstant) {}

// Adjust alignment of arguments passed byval in .param address space. We can
// increase alignment of such arguments in a way that ensures that we can
// effectively vectorize their loads. We should also traverse all loads from
// byval pointer and adjust their alignment, if those were using known offset.
// Such alignment changes must be conformed with parameter store and load in
// NVPTXTargetLowering::LowerCall.
static void adjustByValArgAlignment(Argument *Arg, Value *ArgInParamAS,
                                    const NVPTXTargetLowering *TLI) {}

namespace {
struct ArgUseChecker : PtrUseVisitor<ArgUseChecker> {}; // struct ArgUseChecker
} // namespace

void NVPTXLowerArgs::handleByValParam(const NVPTXTargetMachine &TM,
                                      Argument *Arg) {}

void NVPTXLowerArgs::markPointerAsGlobal(Value *Ptr) {}

// =============================================================================
// Main function for this pass.
// =============================================================================
bool NVPTXLowerArgs::runOnKernelFunction(const NVPTXTargetMachine &TM,
                                         Function &F) {}

// Device functions only need to copy byval args into local memory.
bool NVPTXLowerArgs::runOnDeviceFunction(const NVPTXTargetMachine &TM,
                                         Function &F) {}

bool NVPTXLowerArgs::runOnFunction(Function &F) {}

FunctionPass *llvm::createNVPTXLowerArgsPass() {}