chromium/third_party/skia/src/base/SkVx.h

/*
 * Copyright 2019 Google Inc.
 *
 * Use of this source code is governed by a BSD-style license that can be
 * found in the LICENSE file.
 */

#ifndef SKVX_DEFINED
#define SKVX_DEFINED

// skvx::Vec<N,T> are SIMD vectors of N T's, a v1.5 successor to SkNx<N,T>.
//
// This time we're leaning a bit less on platform-specific intrinsics and a bit
// more on Clang/GCC vector extensions, but still keeping the option open to
// drop in platform-specific intrinsics, actually more easily than before.
//
// We've also fixed a few of the caveats that used to make SkNx awkward to work
// with across translation units.  skvx::Vec<N,T> always has N*sizeof(T) size
// and alignment and is safe to use across translation units freely.
// (Ideally we'd only align to T, but that tanks ARMv7 NEON codegen.)

#include "include/private/base/SkFeatures.h"
#include "src/base/SkUtils.h"
#include <algorithm>         // std::min, std::max
#include <cassert>           // assert()
#include <cmath>             // ceilf, floorf, truncf, roundf, sqrtf, etc.
#include <cstdint>           // intXX_t
#include <cstring>           // memcpy()
#include <initializer_list>  // std::initializer_list
#include <type_traits>
#include <utility>           // std::index_sequence

// Users may disable SIMD with SKNX_NO_SIMD, which may be set via compiler flags.
// The gn build has no option which sets SKNX_NO_SIMD.
// Use SKVX_USE_SIMD internally to avoid confusing double negation.
// Do not use 'defined' in a macro expansion.
#if !defined(SKNX_NO_SIMD)
    #define SKVX_USE_SIMD
#else
    #define SKVX_USE_SIMD
#endif

#if SKVX_USE_SIMD
    #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE1
        #include <immintrin.h>
    #elif defined(SK_ARM_HAS_NEON)
        #include <arm_neon.h>
    #elif defined(__wasm_simd128__)
        #include <wasm_simd128.h>
    #elif SK_CPU_LSX_LEVEL >= SK_CPU_LSX_LEVEL_LASX
        #include <lasxintrin.h>
        #include <lsxintrin.h>
    #elif SK_CPU_LSX_LEVEL >= SK_CPU_LSX_LEVEL_LSX
        #include <lsxintrin.h>
    #endif
#endif

// To avoid ODR violations, all methods must be force-inlined...
#if defined(_MSC_VER)
    #define SKVX_ALWAYS_INLINE
#else
    #define SKVX_ALWAYS_INLINE
#endif

// ... and all standalone functions must be static.  Please use these helpers:
#define SI
#define SIT
#define SIN
#define SINT
#define SINTU

namespace skvx {

template <int N, typename T>
struct alignas(N*sizeof(T)) Vec;

template <int... Ix, int N, typename T>
SI Vec<sizeof...(Ix),T> shuffle(const Vec<N,T>&);

// All Vec have the same simple memory layout, the same as `T vec[N]`.
template <int N, typename T>
struct alignas(N*sizeof(T)) Vec {};

// We have specializations for N == 1 (the base-case), as well as 2 and 4, where we add helpful
// constructors and swizzle accessors.
Vec<4, T>;

Vec<2, T>;

Vec<1, T>;

// Translate from a value type T to its corresponding Mask, the result of a comparison.
template <typename T> struct Mask {};
template <> struct Mask<float > {};
template <> struct Mask<double> {};
M;

// Join two Vec<N,T> into one Vec<2N,T>.
SINT Vec<2*N,T> join(const Vec<N,T>& lo, const Vec<N,T>& hi) {}

// We have three strategies for implementing Vec operations:
//    1) lean on Clang/GCC vector extensions when available;
//    2) use map() to apply a scalar function lane-wise;
//    3) recurse on lo/hi to scalar portable implementations.
// We can slot in platform-specific implementations as overloads for particular Vec<N,T>,
// or often integrate them directly into the recursion of style 3), allowing fine control.

#if SKVX_USE_SIMD && (defined(__clang__) || defined(__GNUC__))

    // VExt<N,T> types have the same size as Vec<N,T> and support most operations directly.
    #if defined(__clang__)
        VExt __attribute__((ext_vector_type(N)));

    #elif defined(__GNUC__)
        template <int N, typename T>
        struct VExtHelper {
            typedef T __attribute__((vector_size(N*sizeof(T)))) type;
        };

        template <int N, typename T>
        using VExt = typename VExtHelper<N,T>::type;

        // For some reason some (new!) versions of GCC cannot seem to deduce N in the generic
        // to_vec<N,T>() below for N=4 and T=float.  This workaround seems to help...
        SI Vec<4,float> to_vec(VExt<4,float> v) { return sk_bit_cast<Vec<4,float>>(v); }
    #endif

    SINT VExt<N,T> to_vext(const Vec<N,T>& v) {}
    SINT Vec <N,T> to_vec(const VExt<N,T>& v) {}

    SINT Vec<N,T> operator+(const Vec<N,T>& x, const Vec<N,T>& y) {}
    SINT Vec<N,T> operator-(const Vec<N,T>& x, const Vec<N,T>& y) {}
    SINT Vec<N,T> operator*(const Vec<N,T>& x, const Vec<N,T>& y) {}
    SINT Vec<N,T> operator/(const Vec<N,T>& x, const Vec<N,T>& y) {}

    SINT Vec<N,T> operator^(const Vec<N,T>& x, const Vec<N,T>& y) {}
    SINT Vec<N,T> operator&(const Vec<N,T>& x, const Vec<N,T>& y) {}
    SINT Vec<N,T> operator|(const Vec<N,T>& x, const Vec<N,T>& y) {}

    SINT Vec<N,T> operator!(const Vec<N,T>& x) {}
    SINT Vec<N,T> operator-(const Vec<N,T>& x) {}
    SINT Vec<N,T> operator~(const Vec<N,T>& x) {}

    SINT Vec<N,T> operator<<(const Vec<N,T>& x, int k) {}
    SINT Vec<N,T> operator>>(const Vec<N,T>& x, int k) {}

    SINT Vec<N,M<T>> operator==(const Vec<N,T>& x, const Vec<N,T>& y) {}
    SINT Vec<N,M<T>> operator!=(const Vec<N,T>& x, const Vec<N,T>& y) {}
    SINT Vec<N,M<T>> operator<=(const Vec<N,T>& x, const Vec<N,T>& y) {}
    SINT Vec<N,M<T>> operator>=(const Vec<N,T>& x, const Vec<N,T>& y) {}
    SINT Vec<N,M<T>> operator< (const Vec<N,T>& x, const Vec<N,T>& y) {}
    SINT Vec<N,M<T>> operator> (const Vec<N,T>& x, const Vec<N,T>& y) {}

#else

    // Either SKNX_NO_SIMD is defined, or Clang/GCC vector extensions are not available.
    // We'll implement things portably with N==1 scalar implementations and recursion onto them.

    // N == 1 scalar implementations.
    SIT Vec<1,T> operator+(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val + y.val; }
    SIT Vec<1,T> operator-(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val - y.val; }
    SIT Vec<1,T> operator*(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val * y.val; }
    SIT Vec<1,T> operator/(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val / y.val; }

    SIT Vec<1,T> operator^(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val ^ y.val; }
    SIT Vec<1,T> operator&(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val & y.val; }
    SIT Vec<1,T> operator|(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val | y.val; }

    SIT Vec<1,T> operator!(const Vec<1,T>& x) { return !x.val; }
    SIT Vec<1,T> operator-(const Vec<1,T>& x) { return -x.val; }
    SIT Vec<1,T> operator~(const Vec<1,T>& x) { return ~x.val; }

    SIT Vec<1,T> operator<<(const Vec<1,T>& x, int k) { return x.val << k; }
    SIT Vec<1,T> operator>>(const Vec<1,T>& x, int k) { return x.val >> k; }

    SIT Vec<1,M<T>> operator==(const Vec<1,T>& x, const Vec<1,T>& y) {
        return x.val == y.val ? ~0 : 0;
    }
    SIT Vec<1,M<T>> operator!=(const Vec<1,T>& x, const Vec<1,T>& y) {
        return x.val != y.val ? ~0 : 0;
    }
    SIT Vec<1,M<T>> operator<=(const Vec<1,T>& x, const Vec<1,T>& y) {
        return x.val <= y.val ? ~0 : 0;
    }
    SIT Vec<1,M<T>> operator>=(const Vec<1,T>& x, const Vec<1,T>& y) {
        return x.val >= y.val ? ~0 : 0;
    }
    SIT Vec<1,M<T>> operator< (const Vec<1,T>& x, const Vec<1,T>& y) {
        return x.val <  y.val ? ~0 : 0;
    }
    SIT Vec<1,M<T>> operator> (const Vec<1,T>& x, const Vec<1,T>& y) {
        return x.val >  y.val ? ~0 : 0;
    }

    // Recurse on lo/hi down to N==1 scalar implementations.
    SINT Vec<N,T> operator+(const Vec<N,T>& x, const Vec<N,T>& y) {
        return join(x.lo + y.lo, x.hi + y.hi);
    }
    SINT Vec<N,T> operator-(const Vec<N,T>& x, const Vec<N,T>& y) {
        return join(x.lo - y.lo, x.hi - y.hi);
    }
    SINT Vec<N,T> operator*(const Vec<N,T>& x, const Vec<N,T>& y) {
        return join(x.lo * y.lo, x.hi * y.hi);
    }
    SINT Vec<N,T> operator/(const Vec<N,T>& x, const Vec<N,T>& y) {
        return join(x.lo / y.lo, x.hi / y.hi);
    }

    SINT Vec<N,T> operator^(const Vec<N,T>& x, const Vec<N,T>& y) {
        return join(x.lo ^ y.lo, x.hi ^ y.hi);
    }
    SINT Vec<N,T> operator&(const Vec<N,T>& x, const Vec<N,T>& y) {
        return join(x.lo & y.lo, x.hi & y.hi);
    }
    SINT Vec<N,T> operator|(const Vec<N,T>& x, const Vec<N,T>& y) {
        return join(x.lo | y.lo, x.hi | y.hi);
    }

    SINT Vec<N,T> operator!(const Vec<N,T>& x) { return join(!x.lo, !x.hi); }
    SINT Vec<N,T> operator-(const Vec<N,T>& x) { return join(-x.lo, -x.hi); }
    SINT Vec<N,T> operator~(const Vec<N,T>& x) { return join(~x.lo, ~x.hi); }

    SINT Vec<N,T> operator<<(const Vec<N,T>& x, int k) { return join(x.lo << k, x.hi << k); }
    SINT Vec<N,T> operator>>(const Vec<N,T>& x, int k) { return join(x.lo >> k, x.hi >> k); }

    SINT Vec<N,M<T>> operator==(const Vec<N,T>& x, const Vec<N,T>& y) {
        return join(x.lo == y.lo, x.hi == y.hi);
    }
    SINT Vec<N,M<T>> operator!=(const Vec<N,T>& x, const Vec<N,T>& y) {
        return join(x.lo != y.lo, x.hi != y.hi);
    }
    SINT Vec<N,M<T>> operator<=(const Vec<N,T>& x, const Vec<N,T>& y) {
        return join(x.lo <= y.lo, x.hi <= y.hi);
    }
    SINT Vec<N,M<T>> operator>=(const Vec<N,T>& x, const Vec<N,T>& y) {
        return join(x.lo >= y.lo, x.hi >= y.hi);
    }
    SINT Vec<N,M<T>> operator< (const Vec<N,T>& x, const Vec<N,T>& y) {
        return join(x.lo <  y.lo, x.hi <  y.hi);
    }
    SINT Vec<N,M<T>> operator> (const Vec<N,T>& x, const Vec<N,T>& y) {
        return join(x.lo >  y.lo, x.hi >  y.hi);
    }
#endif

// Scalar/vector operations splat the scalar to a vector.
SINTU Vec<N,T>    operator+ (U x, const Vec<N,T>& y) {}
SINTU Vec<N,T>    operator- (U x, const Vec<N,T>& y) {}
SINTU Vec<N,T>    operator* (U x, const Vec<N,T>& y) {}
SINTU Vec<N,T>    operator/ (U x, const Vec<N,T>& y) {}
SINTU Vec<N,T>    operator^ (U x, const Vec<N,T>& y) {}
SINTU Vec<N,T>    operator& (U x, const Vec<N,T>& y) {}
SINTU Vec<N,T>    operator| (U x, const Vec<N,T>& y) {}
SINTU Vec<N,M<T>> operator==(U x, const Vec<N,T>& y) {}
SINTU Vec<N,M<T>> operator!=(U x, const Vec<N,T>& y) {}
SINTU Vec<N,M<T>> operator<=(U x, const Vec<N,T>& y) {}
SINTU Vec<N,M<T>> operator>=(U x, const Vec<N,T>& y) {}
SINTU Vec<N,M<T>> operator< (U x, const Vec<N,T>& y) {}
SINTU Vec<N,M<T>> operator> (U x, const Vec<N,T>& y) {}

SINTU Vec<N,T>    operator+ (const Vec<N,T>& x, U y) {}
SINTU Vec<N,T>    operator- (const Vec<N,T>& x, U y) {}
SINTU Vec<N,T>    operator* (const Vec<N,T>& x, U y) {}
SINTU Vec<N,T>    operator/ (const Vec<N,T>& x, U y) {}
SINTU Vec<N,T>    operator^ (const Vec<N,T>& x, U y) {}
SINTU Vec<N,T>    operator& (const Vec<N,T>& x, U y) {}
SINTU Vec<N,T>    operator| (const Vec<N,T>& x, U y) {}
SINTU Vec<N,M<T>> operator==(const Vec<N,T>& x, U y) {}
SINTU Vec<N,M<T>> operator!=(const Vec<N,T>& x, U y) {}
SINTU Vec<N,M<T>> operator<=(const Vec<N,T>& x, U y) {}
SINTU Vec<N,M<T>> operator>=(const Vec<N,T>& x, U y) {}
SINTU Vec<N,M<T>> operator< (const Vec<N,T>& x, U y) {}
SINTU Vec<N,M<T>> operator> (const Vec<N,T>& x, U y) {}

SINT Vec<N,T>& operator+=(Vec<N,T>& x, const Vec<N,T>& y) {}
SINT Vec<N,T>& operator-=(Vec<N,T>& x, const Vec<N,T>& y) {}
SINT Vec<N,T>& operator*=(Vec<N,T>& x, const Vec<N,T>& y) {}
SINT Vec<N,T>& operator/=(Vec<N,T>& x, const Vec<N,T>& y) {}
SINT Vec<N,T>& operator^=(Vec<N,T>& x, const Vec<N,T>& y) {}
SINT Vec<N,T>& operator&=(Vec<N,T>& x, const Vec<N,T>& y) {}
SINT Vec<N,T>& operator|=(Vec<N,T>& x, const Vec<N,T>& y) {}

SINTU Vec<N,T>& operator+=(Vec<N,T>& x, U y) {}
SINTU Vec<N,T>& operator-=(Vec<N,T>& x, U y) {}
SINTU Vec<N,T>& operator*=(Vec<N,T>& x, U y) {}
SINTU Vec<N,T>& operator/=(Vec<N,T>& x, U y) {}
SINTU Vec<N,T>& operator^=(Vec<N,T>& x, U y) {}
SINTU Vec<N,T>& operator&=(Vec<N,T>& x, U y) {}
SINTU Vec<N,T>& operator|=(Vec<N,T>& x, U y) {}

SINT Vec<N,T>& operator<<=(Vec<N,T>& x, int bits) {}
SINT Vec<N,T>& operator>>=(Vec<N,T>& x, int bits) {}

// Some operations we want are not expressible with Clang/GCC vector extensions.

// Clang can reason about naive_if_then_else() and optimize through it better
// than if_then_else(), so it's sometimes useful to call it directly when we
// think an entire expression should optimize away, e.g. min()/max().
SINT Vec<N,T> naive_if_then_else(const Vec<N,M<T>>& cond, const Vec<N,T>& t, const Vec<N,T>& e) {}

SIT Vec<1,T> if_then_else(const Vec<1,M<T>>& cond, const Vec<1,T>& t, const Vec<1,T>& e) {}
SINT Vec<N,T> if_then_else(const Vec<N,M<T>>& cond, const Vec<N,T>& t, const Vec<N,T>& e) {}

SIT  bool any(const Vec<1,T>& x) {}
SINT bool any(const Vec<N,T>& x) {}

SIT  bool all(const Vec<1,T>& x) {}
SINT bool all(const Vec<N,T>& x) {}

// cast() Vec<N,S> to Vec<N,D>, as if applying a C-cast to each lane.
// TODO: implement with map()?
template <typename D, typename S>
SI Vec<1,D> cast(const Vec<1,S>& src) {}

template <typename D, int N, typename S>
SI Vec<N,D> cast(const Vec<N,S>& src) {}

// min/max match logic of std::min/std::max, which is important when NaN is involved.
SIT  T min(const Vec<1,T>& x) {}
SIT  T max(const Vec<1,T>& x) {}
SINT T min(const Vec<N,T>& x) {}
SINT T max(const Vec<N,T>& x) {}

SINT Vec<N,T> min(const Vec<N,T>& x, const Vec<N,T>& y) {}
SINT Vec<N,T> max(const Vec<N,T>& x, const Vec<N,T>& y) {}

SINTU Vec<N,T> min(const Vec<N,T>& x, U y) {}
SINTU Vec<N,T> max(const Vec<N,T>& x, U y) {}
SINTU Vec<N,T> min(U x, const Vec<N,T>& y) {}
SINTU Vec<N,T> max(U x, const Vec<N,T>& y) {}

// pin matches the logic of SkTPin, which is important when NaN is involved. It always returns
// values in the range lo..hi, and if x is NaN, it returns lo.
SINT Vec<N,T> pin(const Vec<N,T>& x, const Vec<N,T>& lo, const Vec<N,T>& hi) {}

// Shuffle values from a vector pretty arbitrarily:
//    skvx::Vec<4,float> rgba = {R,G,B,A};
//    shuffle<2,1,0,3>        (rgba) ~> {B,G,R,A}
//    shuffle<2,1>            (rgba) ~> {B,G}
//    shuffle<2,1,2,1,2,1,2,1>(rgba) ~> {B,G,B,G,B,G,B,G}
//    shuffle<3,3,3,3>        (rgba) ~> {A,A,A,A}
// The only real restriction is that the output also be a legal N=power-of-two sknx::Vec.
template <int... Ix, int N, typename T>
SI Vec<sizeof...(Ix),T> shuffle(const Vec<N,T>& x) {}

// Call map(fn, x) for a vector with fn() applied to each lane of x, { fn(x[0]), fn(x[1]), ... },
// or map(fn, x,y) for a vector of fn(x[i], y[i]), etc.

template <typename Fn, typename... Args, size_t... I>
SI auto map(std::index_sequence<I...>,
            Fn&& fn, const Args&... args) -> skvx::Vec<sizeof...(I), decltype(fn(args[0]...))> {}

template <typename Fn, int N, typename T, typename... Rest>
auto map(Fn&& fn, const Vec<N,T>& first, const Rest&... rest) {}

SIN Vec<N,float>  ceil(const Vec<N,float>& x) {}
SIN Vec<N,float> floor(const Vec<N,float>& x) {}
SIN Vec<N,float> trunc(const Vec<N,float>& x) {}
SIN Vec<N,float> round(const Vec<N,float>& x) {}
SIN Vec<N,float>  sqrt(const Vec<N,float>& x) {}
SIN Vec<N,float>   abs(const Vec<N,float>& x) {}
SIN Vec<N,float>   fma(const Vec<N,float>& x,
                       const Vec<N,float>& y,
                       const Vec<N,float>& z) {}

SI Vec<1,int> lrint(const Vec<1,float>& x) {}
SIN Vec<N,int> lrint(const Vec<N,float>& x) {}

SIN Vec<N,float> fract(const Vec<N,float>& x) {}

// Converts float to half, rounding to nearest even, and supporting de-normal f16 conversion,
// and overflow to f16 infinity. Should not be called with NaNs, since it can convert NaN->inf.
// KEEP IN SYNC with skcms' Half_from_F to ensure that f16 colors are computed consistently in both
// skcms and skvx.
SIN Vec<N,uint16_t> to_half(const Vec<N,float>& x) {}

// Converts from half to float, preserving NaN and +/- infinity.
// KEEP IN SYNC with skcms' F_from_Half to ensure that f16 colors are computed consistently in both
// skcms and skvx.
SIN Vec<N,float> from_half(const Vec<N,uint16_t>& x) {}

// div255(x) = (x + 127) / 255 is a bit-exact rounding divide-by-255, packing down to 8-bit.
SIN Vec<N,uint8_t> div255(const Vec<N,uint16_t>& x) {}

// approx_scale(x,y) approximates div255(cast<uint16_t>(x)*cast<uint16_t>(y)) within a bit,
// and is always perfect when x or y is 0 or 255.
SIN Vec<N,uint8_t> approx_scale(const Vec<N,uint8_t>& x, const Vec<N,uint8_t>& y) {}

// saturated_add(x,y) sums values and clamps to the maximum value instead of overflowing.
SINT std::enable_if_t<std::is_unsigned_v<T>, Vec<N,T>> saturated_add(const Vec<N,T>& x,
                                                                     const Vec<N,T>& y) {}

// The ScaledDividerU32 takes a divisor > 1, and creates a function divide(numerator) that
// calculates a numerator / denominator. For this to be rounded properly, numerator should have
// half added in:
// divide(numerator + half) == floor(numerator/denominator + 1/2).
//
// This gives an answer within +/- 1 from the true value.
//
// Derivation of half:
//    numerator/denominator + 1/2 = (numerator + half) / d
//    numerator + denominator / 2 = numerator + half
//    half = denominator / 2.
//
// Because half is divided by 2, that division must also be rounded.
//    half == denominator / 2 = (denominator + 1) / 2.
//
// The divisorFactor is just a scaled value:
//    divisorFactor = (1 / divisor) * 2 ^ 32.
// The maximum that can be divided and rounded is UINT_MAX - half.
class ScaledDividerU32 {};


SIN Vec<N,uint16_t> mull(const Vec<N,uint8_t>& x,
                         const Vec<N,uint8_t>& y) {}

SIN Vec<N,uint32_t> mull(const Vec<N,uint16_t>& x,
                         const Vec<N,uint16_t>& y) {}

SIN Vec<N,uint16_t> mulhi(const Vec<N,uint16_t>& x,
                          const Vec<N,uint16_t>& y) {}

SINT T dot(const Vec<N, T>& a, const Vec<N, T>& b) {}

SIT T cross(const Vec<2, T>& a, const Vec<2, T>& b) {}

SIN float length(const Vec<N, float>& v) {}

SIN double length(const Vec<N, double>& v) {}

SIN Vec<N, float> normalize(const Vec<N, float>& v) {}

SIN Vec<N, double> normalize(const Vec<N, double>& v) {}

SINT bool isfinite(const Vec<N, T>& v) {}

// De-interleaving load of 4 vectors.
//
// WARNING: These are really only supported well on NEON. Consider restructuring your data before
// resorting to these methods.
SIT void strided_load4(const T* v,
                       Vec<1,T>& a,
                       Vec<1,T>& b,
                       Vec<1,T>& c,
                       Vec<1,T>& d) {}
SINT void strided_load4(const T* v,
                        Vec<N,T>& a,
                        Vec<N,T>& b,
                        Vec<N,T>& c,
                        Vec<N,T>& d) {}
#if SKVX_USE_SIMD && defined(SK_ARM_HAS_NEON)
#define IMPL_LOAD4_TRANSPOSED
IMPL_LOAD4_TRANSPOSED(2, uint32_t, vld4_u32)
IMPL_LOAD4_TRANSPOSED(4, uint16_t, vld4_u16)
IMPL_LOAD4_TRANSPOSED(8, uint8_t, vld4_u8)
IMPL_LOAD4_TRANSPOSED(2, int32_t, vld4_s32)
IMPL_LOAD4_TRANSPOSED(4, int16_t, vld4_s16)
IMPL_LOAD4_TRANSPOSED(8, int8_t, vld4_s8)
IMPL_LOAD4_TRANSPOSED(2, float, vld4_f32)
IMPL_LOAD4_TRANSPOSED(4, uint32_t, vld4q_u32)
IMPL_LOAD4_TRANSPOSED(8, uint16_t, vld4q_u16)
IMPL_LOAD4_TRANSPOSED(16, uint8_t, vld4q_u8)
IMPL_LOAD4_TRANSPOSED(4, int32_t, vld4q_s32)
IMPL_LOAD4_TRANSPOSED(8, int16_t, vld4q_s16)
IMPL_LOAD4_TRANSPOSED(16, int8_t, vld4q_s8)
IMPL_LOAD4_TRANSPOSED(4, float, vld4q_f32)
#undef IMPL_LOAD4_TRANSPOSED

#elif SKVX_USE_SIMD && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE1

SI void strided_load4(const float* v,
                      Vec<4,float>& a,
                      Vec<4,float>& b,
                      Vec<4,float>& c,
                      Vec<4,float>& d) {}

#elif SKVX_USE_SIMD && SKVX_CPU_LSX_LEVEL >= SK_CPU_LSX_LEVEL_LSX
#define _LSX_TRANSPOSE4

SI void strided_load4(const int* v,
                      Vec<4,int>& a,
                      Vec<4,int>& b,
                      Vec<4,int>& c,
                      Vec<4,int>& d) {
    __m128i a_ = __lsx_vld(v, 0);
    __m128i b_ = __lsx_vld(v, 16);
    __m128i c_ = __lsx_vld(v, 32);
    __m128i d_ = __lsx_vld(v, 48);
    _LSX_TRANSPOSE4(a_, b_, c_, d_);
    a = sk_bit_cast<Vec<4,int>>(a_);
    b = sk_bit_cast<Vec<4,int>>(b_);
    c = sk_bit_cast<Vec<4,int>>(c_);
    d = sk_bit_cast<Vec<4,int>>(d_);
}
#endif

// De-interleaving load of 2 vectors.
//
// WARNING: These are really only supported well on NEON. Consider restructuring your data before
// resorting to these methods.
SIT void strided_load2(const T* v, Vec<1,T>& a, Vec<1,T>& b) {}
SINT void strided_load2(const T* v, Vec<N,T>& a, Vec<N,T>& b) {}
#if SKVX_USE_SIMD && defined(SK_ARM_HAS_NEON)
#define IMPL_LOAD2_TRANSPOSED
IMPL_LOAD2_TRANSPOSED(2, uint32_t, vld2_u32)
IMPL_LOAD2_TRANSPOSED(4, uint16_t, vld2_u16)
IMPL_LOAD2_TRANSPOSED(8, uint8_t, vld2_u8)
IMPL_LOAD2_TRANSPOSED(2, int32_t, vld2_s32)
IMPL_LOAD2_TRANSPOSED(4, int16_t, vld2_s16)
IMPL_LOAD2_TRANSPOSED(8, int8_t, vld2_s8)
IMPL_LOAD2_TRANSPOSED(2, float, vld2_f32)
IMPL_LOAD2_TRANSPOSED(4, uint32_t, vld2q_u32)
IMPL_LOAD2_TRANSPOSED(8, uint16_t, vld2q_u16)
IMPL_LOAD2_TRANSPOSED(16, uint8_t, vld2q_u8)
IMPL_LOAD2_TRANSPOSED(4, int32_t, vld2q_s32)
IMPL_LOAD2_TRANSPOSED(8, int16_t, vld2q_s16)
IMPL_LOAD2_TRANSPOSED(16, int8_t, vld2q_s8)
IMPL_LOAD2_TRANSPOSED(4, float, vld2q_f32)
#undef IMPL_LOAD2_TRANSPOSED
#endif

// Define commonly used aliases
float2;
float4;
float8;

double2;
double4;
double8;

byte2;
byte4;
byte8;
byte16;

int2;
int4;
int8;

ushort2;
ushort4;
ushort8;

uint2;
uint4;
uint8;

long2;
long4;
long8;

// Use with from_half and to_half to convert between floatX, and use these for storage.
half2;
half4;
half8;

}  // namespace skvx

#undef SINTU
#undef SINT
#undef SIN
#undef SIT
#undef SI
#undef SKVX_ALWAYS_INLINE
#undef SKVX_USE_SIMD

#endif//SKVX_DEFINED