//===- SparseTensorRuntime.cpp - SparseTensor runtime support lib ---------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements a light-weight runtime support library for // manipulating sparse tensors from MLIR. More specifically, it provides // C-API wrappers so that MLIR-generated code can call into the C++ runtime // support library. The functionality provided in this library is meant // to simplify benchmarking, testing, and debugging of MLIR code operating // on sparse tensors. However, the provided functionality is **not** // part of core MLIR itself. // // The following memory-resident sparse storage schemes are supported: // // (a) A coordinate scheme for temporarily storing and lexicographically // sorting a sparse tensor by coordinate (SparseTensorCOO). // // (b) A "one-size-fits-all" sparse tensor storage scheme defined by // per-dimension sparse/dense annnotations together with a dimension // ordering used by MLIR compiler-generated code (SparseTensorStorage). // // The following external formats are supported: // // (1) Matrix Market Exchange (MME): *.mtx // https://math.nist.gov/MatrixMarket/formats.html // // (2) Formidable Repository of Open Sparse Tensors and Tools (FROSTT): *.tns // http://frostt.io/tensors/file-formats.html // // Two public APIs are supported: // // (I) Methods operating on MLIR buffers (memrefs) to interact with sparse // tensors. These methods should be used exclusively by MLIR // compiler-generated code. // // (II) Methods that accept C-style data structures to interact with sparse // tensors. These methods can be used by any external runtime that wants // to interact with MLIR compiler-generated code. // // In both cases (I) and (II), the SparseTensorStorage format is externally // only visible as an opaque pointer. // //===----------------------------------------------------------------------===// #include "mlir/ExecutionEngine/SparseTensorRuntime.h" #ifdef MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS #include "mlir/ExecutionEngine/SparseTensor/ArithmeticUtils.h" #include "mlir/ExecutionEngine/SparseTensor/COO.h" #include "mlir/ExecutionEngine/SparseTensor/File.h" #include "mlir/ExecutionEngine/SparseTensor/Storage.h" #include <cstring> #include <numeric> usingnamespacemlir::sparse_tensor; //===----------------------------------------------------------------------===// // // Utilities for manipulating `StridedMemRefType`. // //===----------------------------------------------------------------------===// namespace { #define ASSERT_NO_STRIDE … #define MEMREF_GET_USIZE … #define ASSERT_USIZE_EQ … #define MEMREF_GET_PAYLOAD … /// Initializes the memref with the provided size and data pointer. This /// is designed for functions which want to "return" a memref that aliases /// into memory owned by some other object (e.g., `SparseTensorStorage`), /// without doing any actual copying. (The "return" is in scarequotes /// because the `_mlir_ciface_` calling convention migrates any returned /// memrefs into an out-parameter passed before all the other function /// parameters.) template <typename DataSizeT, typename T> static inline void aliasIntoMemref(DataSizeT size, T *data, StridedMemRefType<T, 1> &ref) { … } } // anonymous namespace extern … // extern "C" #undef MEMREF_GET_PAYLOAD #undef ASSERT_USIZE_EQ #undef MEMREF_GET_USIZE #undef ASSERT_NO_STRIDE #endif // MLIR_CRUNNERUTILS_DEFINE_FUNCTIONS