chromium/third_party/eigen3/src/Eigen/src/Core/VectorwiseOp.h

// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2019 Gael Guennebaud <[email protected]>
// Copyright (C) 2006-2008 Benoit Jacob <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.

#ifndef EIGEN_PARTIAL_REDUX_H
#define EIGEN_PARTIAL_REDUX_H

// IWYU pragma: private
#include "./InternalHeaderCheck.h"

namespace Eigen {

/** \class PartialReduxExpr
 * \ingroup Core_Module
 *
 * \brief Generic expression of a partially reduxed matrix
 *
 * \tparam MatrixType the type of the matrix we are applying the redux operation
 * \tparam MemberOp type of the member functor
 * \tparam Direction indicates the direction of the redux (#Vertical or #Horizontal)
 *
 * This class represents an expression of a partial redux operator of a matrix.
 * It is the return type of some VectorwiseOp functions,
 * and most of the time this is the only way it is used.
 *
 * \sa class VectorwiseOp
 */

template <typename MatrixType, typename MemberOp, int Direction>
class PartialReduxExpr;

namespace internal {
traits<PartialReduxExpr<MatrixType, MemberOp, Direction>>;
}  // namespace internal

template <typename MatrixType, typename MemberOp, int Direction>
class PartialReduxExpr : public internal::dense_xpr_base<PartialReduxExpr<MatrixType, MemberOp, Direction> >::type,
                         internal::no_assignment_operator {};

template <typename A, typename B>
struct partial_redux_dummy_func;

#define EIGEN_MAKE_PARTIAL_REDUX_FUNCTOR(MEMBER, COST, VECTORIZABLE, BINARYOP)

#define EIGEN_MEMBER_FUNCTOR(MEMBER, COST)

namespace internal {

EIGEN_MEMBER_FUNCTOR();
EIGEN_MEMBER_FUNCTOR();
EIGEN_MEMBER_FUNCTOR();
EIGEN_MEMBER_FUNCTOR();
EIGEN_MEMBER_FUNCTOR();
EIGEN_MEMBER_FUNCTOR();
EIGEN_MEMBER_FUNCTOR();

EIGEN_MAKE_PARTIAL_REDUX_FUNCTOR(sum, (Size - 1) * NumTraits<Scalar>::AddCost, 1, internal::scalar_sum_op);
EIGEN_MAKE_PARTIAL_REDUX_FUNCTOR(minCoeff, (Size - 1) * NumTraits<Scalar>::AddCost, 1, internal::scalar_min_op);
EIGEN_MAKE_PARTIAL_REDUX_FUNCTOR(maxCoeff, (Size - 1) * NumTraits<Scalar>::AddCost, 1, internal::scalar_max_op);
EIGEN_MAKE_PARTIAL_REDUX_FUNCTOR(prod, (Size - 1) * NumTraits<Scalar>::MulCost, 1, internal::scalar_product_op);

template <int p, typename ResultType, typename Scalar>
struct member_lpnorm {};

template <typename BinaryOpT, typename Scalar>
struct member_redux {};
}  // namespace internal

/** \class VectorwiseOp
 * \ingroup Core_Module
 *
 * \brief Pseudo expression providing broadcasting and partial reduction operations
 *
 * \tparam ExpressionType the type of the object on which to do partial reductions
 * \tparam Direction indicates whether to operate on columns (#Vertical) or rows (#Horizontal)
 *
 * This class represents a pseudo expression with broadcasting and partial reduction features.
 * It is the return type of DenseBase::colwise() and DenseBase::rowwise()
 * and most of the time this is the only way it is explicitly used.
 *
 * To understand the logic of rowwise/colwise expression, let's consider a generic case `A.colwise().foo()`
 * where `foo` is any method of `VectorwiseOp`. This expression is equivalent to applying `foo()` to each
 * column of `A` and then re-assemble the outputs in a matrix expression:
 * \code [A.col(0).foo(), A.col(1).foo(), ..., A.col(A.cols()-1).foo()] \endcode
 *
 * Example: \include MatrixBase_colwise.cpp
 * Output: \verbinclude MatrixBase_colwise.out
 *
 * The begin() and end() methods are obviously exceptions to the previous rule as they
 * return STL-compatible begin/end iterators to the rows or columns of the nested expression.
 * Typical use cases include for-range-loop and calls to STL algorithms:
 *
 * Example: \include MatrixBase_colwise_iterator_cxx11.cpp
 * Output: \verbinclude MatrixBase_colwise_iterator_cxx11.out
 *
 * For a partial reduction on an empty input, some rules apply.
 * For the sake of clarity, let's consider a vertical reduction:
 *   - If the number of columns is zero, then a 1x0 row-major vector expression is returned.
 *   - Otherwise, if the number of rows is zero, then
 *       - a row vector of zeros is returned for sum-like reductions (sum, squaredNorm, norm, etc.)
 *       - a row vector of ones is returned for a product reduction (e.g., <code>MatrixXd(n,0).colwise().prod()</code>)
 *       - an assert is triggered for all other reductions (minCoeff,maxCoeff,redux(bin_op))
 *
 * \sa DenseBase::colwise(), DenseBase::rowwise(), class PartialReduxExpr
 */
template <typename ExpressionType, int Direction>
class VectorwiseOp {};

// const colwise moved to DenseBase.h due to CUDA compiler bug

/** \returns a writable VectorwiseOp wrapper of *this providing additional partial reduction operations
 *
 * \sa rowwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting
 */
template <typename Derived>
EIGEN_DEVICE_FUNC inline typename DenseBase<Derived>::ColwiseReturnType DenseBase<Derived>::colwise() {}

// const rowwise moved to DenseBase.h due to CUDA compiler bug

/** \returns a writable VectorwiseOp wrapper of *this providing additional partial reduction operations
 *
 * \sa colwise(), class VectorwiseOp, \ref TutorialReductionsVisitorsBroadcasting
 */
template <typename Derived>
EIGEN_DEVICE_FUNC inline typename DenseBase<Derived>::RowwiseReturnType DenseBase<Derived>::rowwise() {}

}  // end namespace Eigen

#endif  // EIGEN_PARTIAL_REDUX_H