llvm/mlir/include/mlir/ExecutionEngine/MemRefUtils.h

//===- MemRefUtils.h - Memref helpers to invoke MLIR JIT code ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Utils for MLIR ABI interfacing with frameworks.
//
// The templated free functions below make it possible to allocate dense
// contiguous buffers with shapes that interoperate properly with the MLIR
// codegen ABI.
//
//===----------------------------------------------------------------------===//

#include "mlir/ExecutionEngine/CRunnerUtils.h"
#include "mlir/Support/LLVM.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"

#include "llvm/Support/raw_ostream.h"

#include <algorithm>
#include <array>
#include <cassert>
#include <climits>
#include <functional>
#include <initializer_list>
#include <memory>
#include <optional>

#ifndef MLIR_EXECUTIONENGINE_MEMREFUTILS_H_
#define MLIR_EXECUTIONENGINE_MEMREFUTILS_H_

namespace mlir {
AllocFunType;

namespace detail {

/// Given a shape with sizes greater than 0 along all dimensions, returns the
/// distance, in number of elements, between a slice in a dimension and the next
/// slice in the same dimension.
///    e.g. shape[3, 4, 5] -> strides[20, 5, 1]
template <size_t N>
inline std::array<int64_t, N> makeStrides(ArrayRef<int64_t> shape) {}

/// Build a `StridedMemRefDescriptor<T, N>` that matches the MLIR ABI.
/// This is an implementation detail that is kept in sync with MLIR codegen
/// conventions.  Additionally takes a `shapeAlloc` array which
/// is used instead of `shape` to allocate "more aligned" data and compute the
/// corresponding strides.
template <int N, typename T>
typename std::enable_if<(N >= 1), StridedMemRefType<T, N>>::type
makeStridedMemRefDescriptor(T *ptr, T *alignedPtr, ArrayRef<int64_t> shape,
                            ArrayRef<int64_t> shapeAlloc) {}

/// Build a `StridedMemRefDescriptor<T, 0>` that matches the MLIR ABI.
/// This is an implementation detail that is kept in sync with MLIR codegen
/// conventions.  Additionally takes a `shapeAlloc` array which
/// is used instead of `shape` to allocate "more aligned" data and compute the
/// corresponding strides.
template <int N, typename T>
typename std::enable_if<(N == 0), StridedMemRefType<T, 0>>::type
makeStridedMemRefDescriptor(T *ptr, T *alignedPtr, ArrayRef<int64_t> shape = {}

/// Align `nElements` of type T with an optional `alignment`.
/// This replaces a portable `posix_memalign`.
/// `alignment` must be a power of 2 and greater than the size of T. By default
/// the alignment is sizeof(T).
template <typename T>
std::pair<T *, T *>
allocAligned(size_t nElements, AllocFunType allocFun = &::malloc,
             std::optional<uint64_t> alignment = std::optional<uint64_t>()) {}

} // namespace detail

//===----------------------------------------------------------------------===//
// Public API
//===----------------------------------------------------------------------===//

/// Convenient callback to "visit" a memref element by element.
/// This takes a reference to an individual element as well as the coordinates.
/// It can be used in conjuction with a StridedMemrefIterator.
ElementWiseVisitor;

/// Owning MemRef type that abstracts over the runtime type for ranked strided
/// memref.
template <typename T, unsigned Rank>
class OwningMemRef {};

} // namespace mlir

#endif // MLIR_EXECUTIONENGINE_MEMREFUTILS_H_