godot/thirdparty/etcpak/ProcessRGB.cpp

#include <array>
#include <string.h>
#include <limits>
#ifdef __ARM_NEON
#  include <arm_neon.h>
#endif

#include "Dither.hpp"
#include "ForceInline.hpp"
#include "Math.hpp"
#include "ProcessCommon.hpp"
#include "ProcessRGB.hpp"
#include "Tables.hpp"
#include "Vector.hpp"
#if defined __SSE4_1__ || defined __AVX2__ || defined _MSC_VER
#  ifdef _MSC_VER
#    include <intrin.h>
#    include <Windows.h>
#define _bswap
#define _bswap64
#  else
#    include <x86intrin.h>
#  endif
#endif

#ifndef _bswap
#define _bswap(x)
#define _bswap64(x)
#endif

static const uint32_t MaxError =; // ((38+76+14) * 255)^2
// common T-/H-mode table
static uint8_t tableTH[8] =;

// thresholds for the early compression-mode decision scheme
// default: 0.03, 0.09, and 0.38
float ecmd_threshold[3] =;

static const uint8_t ModeUndecided =;
static const uint8_t ModePlanar =;
static const uint8_t ModeTH =;

const unsigned int R =;
const unsigned int G =;
const unsigned int B =;

struct Luma
{};

#ifdef __AVX2__
struct Plane
{
    uint64_t plane;
    uint64_t error;
    __m256i sum4;
};
#endif

#if defined __AVX2__ || (defined __ARM_NEON && defined __aarch64__)
struct Channels
{
#ifdef __AVX2__
    __m128i r8, g8, b8;
#elif defined __ARM_NEON && defined __aarch64__
    uint8x16x2_t r, g, b;
#endif
};
#endif

namespace
{
static etcpak_force_inline uint8_t clamp( uint8_t min, int16_t val, uint8_t max )
{}

static etcpak_force_inline uint8_t clampMin( uint8_t min, int16_t val )
{}

static etcpak_force_inline uint8_t clampMax( int16_t val, uint8_t max )
{}

// slightly faster than std::sort
static void insertionSort( uint8_t* arr1, uint8_t* arr2 )
{}

//converts indices from  |a0|a1|e0|e1|i0|i1|m0|m1|b0|b1|f0|f1|j0|j1|n0|n1|c0|c1|g0|g1|k0|k1|o0|o1|d0|d1|h0|h1|l0|l1|p0|p1| previously used by T- and H-modes
//                     into  |p0|o0|n0|m0|l0|k0|j0|i0|h0|g0|f0|e0|d0|c0|b0|a0|p1|o1|n1|m1|l1|k1|j1|i1|h1|g1|f1|e1|d1|c1|b1|a1| which should be used for all modes.
// NO WARRANTY --- SEE STATEMENT IN TOP OF FILE (C) Ericsson AB 2005-2013. All Rights Reserved.
static etcpak_force_inline int indexConversion( int pixelIndices )
{}

// Swapping two RGB-colors
// NO WARRANTY --- SEE STATEMENT IN TOP OF FILE (C) Ericsson AB 2005-2013. All Rights Reserved.
static etcpak_force_inline void swapColors( uint8_t( colors )[2][3] )
{}


// calculates quantized colors for T or H modes
void compressColor( uint8_t( currColor )[2][3], uint8_t( quantColor )[2][3], bool t_mode )
{}

// three decoding functions come from ETCPACK v2.74 and are slightly changed.
static etcpak_force_inline void decompressColor( uint8_t( colorsRGB444 )[2][3], uint8_t( colors )[2][3] )
{}

// calculates the paint colors from the block colors
// using a distance d and one of the H- or T-patterns.
static void calculatePaintColors59T( uint8_t d, uint8_t( colors )[2][3], uint8_t( pColors )[4][3] )
{}

static void calculatePaintColors58H( uint8_t d, uint8_t( colors )[2][3], uint8_t( pColors )[4][3] )
{}

#if defined _MSC_VER && !defined __clang__
static etcpak_force_inline unsigned long _bit_scan_forward( unsigned long mask )
{
    unsigned long ret;
    _BitScanForward( &ret, mask );
    return ret;
}
#endif

v4i;

#ifdef __AVX2__
static etcpak_force_inline __m256i Sum4_AVX2( const uint8_t* data) noexcept
{
    __m128i d0 = _mm_loadu_si128(((__m128i*)data) + 0);
    __m128i d1 = _mm_loadu_si128(((__m128i*)data) + 1);
    __m128i d2 = _mm_loadu_si128(((__m128i*)data) + 2);
    __m128i d3 = _mm_loadu_si128(((__m128i*)data) + 3);

    __m128i dm0 = _mm_and_si128(d0, _mm_set1_epi32(0x00FFFFFF));
    __m128i dm1 = _mm_and_si128(d1, _mm_set1_epi32(0x00FFFFFF));
    __m128i dm2 = _mm_and_si128(d2, _mm_set1_epi32(0x00FFFFFF));
    __m128i dm3 = _mm_and_si128(d3, _mm_set1_epi32(0x00FFFFFF));

    __m256i t0 = _mm256_cvtepu8_epi16(dm0);
    __m256i t1 = _mm256_cvtepu8_epi16(dm1);
    __m256i t2 = _mm256_cvtepu8_epi16(dm2);
    __m256i t3 = _mm256_cvtepu8_epi16(dm3);

    __m256i sum0 = _mm256_add_epi16(t0, t1);
    __m256i sum1 = _mm256_add_epi16(t2, t3);

    __m256i s0 = _mm256_permute2x128_si256(sum0, sum1, (0) | (3 << 4)); // 0, 0, 3, 3
    __m256i s1 = _mm256_permute2x128_si256(sum0, sum1, (1) | (2 << 4)); // 1, 1, 2, 2

    __m256i s2 = _mm256_permute4x64_epi64(s0, _MM_SHUFFLE(1, 3, 0, 2));
    __m256i s3 = _mm256_permute4x64_epi64(s0, _MM_SHUFFLE(0, 2, 1, 3));
    __m256i s4 = _mm256_permute4x64_epi64(s1, _MM_SHUFFLE(3, 1, 0, 2));
    __m256i s5 = _mm256_permute4x64_epi64(s1, _MM_SHUFFLE(2, 0, 1, 3));

    __m256i sum5 = _mm256_add_epi16(s2, s3); //   3,   0,   3,   0
    __m256i sum6 = _mm256_add_epi16(s4, s5); //   2,   1,   1,   2
    return _mm256_add_epi16(sum5, sum6);     // 3+2, 0+1, 3+1, 3+2
}

static etcpak_force_inline __m256i Average_AVX2( const __m256i data) noexcept
{
    __m256i a = _mm256_add_epi16(data, _mm256_set1_epi16(4));

    return _mm256_srli_epi16(a, 3);
}

static etcpak_force_inline __m128i CalcErrorBlock_AVX2( const __m256i data, const v4i a[8]) noexcept
{
    //
    __m256i a0 = _mm256_load_si256((__m256i*)a[0].data());
    __m256i a1 = _mm256_load_si256((__m256i*)a[4].data());

    // err = 8 * ( sq( average[0] ) + sq( average[1] ) + sq( average[2] ) );
    __m256i a4 = _mm256_madd_epi16(a0, a0);
    __m256i a5 = _mm256_madd_epi16(a1, a1);

    __m256i a6 = _mm256_hadd_epi32(a4, a5);
    __m256i a7 = _mm256_slli_epi32(a6, 3);

    __m256i a8 = _mm256_add_epi32(a7, _mm256_set1_epi32(0x3FFFFFFF)); // Big value to prevent negative values, but small enough to prevent overflow

    // average is not swapped
    // err -= block[0] * 2 * average[0];
    // err -= block[1] * 2 * average[1];
    // err -= block[2] * 2 * average[2];
    __m256i a2 = _mm256_slli_epi16(a0, 1);
    __m256i a3 = _mm256_slli_epi16(a1, 1);
    __m256i b0 = _mm256_madd_epi16(a2, data);
    __m256i b1 = _mm256_madd_epi16(a3, data);

    __m256i b2 = _mm256_hadd_epi32(b0, b1);
    __m256i b3 = _mm256_sub_epi32(a8, b2);
    __m256i b4 = _mm256_hadd_epi32(b3, b3);

    __m256i b5 = _mm256_permutevar8x32_epi32(b4, _mm256_set_epi32(0, 0, 0, 0, 5, 1, 4, 0));

    return _mm256_castsi256_si128(b5);
}

static etcpak_force_inline void ProcessAverages_AVX2(const __m256i d, v4i a[8] ) noexcept
{
    __m256i t = _mm256_add_epi16(_mm256_mullo_epi16(d, _mm256_set1_epi16(31)), _mm256_set1_epi16(128));

    __m256i c = _mm256_srli_epi16(_mm256_add_epi16(t, _mm256_srli_epi16(t, 8)), 8);

    __m256i c1 = _mm256_shuffle_epi32(c, _MM_SHUFFLE(3, 2, 3, 2));
    __m256i diff = _mm256_sub_epi16(c, c1);
    diff = _mm256_max_epi16(diff, _mm256_set1_epi16(-4));
    diff = _mm256_min_epi16(diff, _mm256_set1_epi16(3));

    __m256i co = _mm256_add_epi16(c1, diff);

    c = _mm256_blend_epi16(co, c, 0xF0);

    __m256i a0 = _mm256_or_si256(_mm256_slli_epi16(c, 3), _mm256_srli_epi16(c, 2));

    _mm256_store_si256((__m256i*)a[4].data(), a0);

    __m256i t0 = _mm256_add_epi16(_mm256_mullo_epi16(d, _mm256_set1_epi16(15)), _mm256_set1_epi16(128));
    __m256i t1 = _mm256_srli_epi16(_mm256_add_epi16(t0, _mm256_srli_epi16(t0, 8)), 8);

    __m256i t2 = _mm256_or_si256(t1, _mm256_slli_epi16(t1, 4));

    _mm256_store_si256((__m256i*)a[0].data(), t2);
}

static etcpak_force_inline uint64_t EncodeAverages_AVX2( const v4i a[8], size_t idx ) noexcept
{
    uint64_t d = ( idx << 24 );
    size_t base = idx << 1;

    __m128i a0 = _mm_load_si128((const __m128i*)a[base].data());

    __m128i r0, r1;

    if( ( idx & 0x2 ) == 0 )
    {
        r0 = _mm_srli_epi16(a0, 4);

        __m128i a1 = _mm_unpackhi_epi64(r0, r0);
        r1 = _mm_slli_epi16(a1, 4);
    }
    else
    {
        __m128i a1 = _mm_and_si128(a0, _mm_set1_epi16(-8));

        r0 = _mm_unpackhi_epi64(a1, a1);
        __m128i a2 = _mm_sub_epi16(a1, r0);
        __m128i a3 = _mm_srai_epi16(a2, 3);
        r1 = _mm_and_si128(a3, _mm_set1_epi16(0x07));
    }

    __m128i r2 = _mm_or_si128(r0, r1);
    // do missing swap for average values
    __m128i r3 = _mm_shufflelo_epi16(r2, _MM_SHUFFLE(3, 0, 1, 2));
    __m128i r4 = _mm_packus_epi16(r3, _mm_setzero_si128());
    d |= _mm_cvtsi128_si32(r4);

    return d;
}

static etcpak_force_inline uint64_t CheckSolid_AVX2( const uint8_t* src ) noexcept
{
    __m256i d0 = _mm256_loadu_si256(((__m256i*)src) + 0);
    __m256i d1 = _mm256_loadu_si256(((__m256i*)src) + 1);

    __m256i c = _mm256_broadcastd_epi32(_mm256_castsi256_si128(d0));

    __m256i c0 = _mm256_cmpeq_epi8(d0, c);
    __m256i c1 = _mm256_cmpeq_epi8(d1, c);

    __m256i m = _mm256_and_si256(c0, c1);

    if (!_mm256_testc_si256(m, _mm256_set1_epi32(-1)))
    {
        return 0;
    }

    return 0x02000000 |
        ( (unsigned int)( src[0] & 0xF8 ) << 16 ) |
        ( (unsigned int)( src[1] & 0xF8 ) << 8 ) |
        ( (unsigned int)( src[2] & 0xF8 ) );
}

static etcpak_force_inline __m128i PrepareAverages_AVX2( v4i a[8], const uint8_t* src) noexcept
{
    __m256i sum4 = Sum4_AVX2( src );

    ProcessAverages_AVX2(Average_AVX2( sum4 ), a );

    return CalcErrorBlock_AVX2( sum4, a);
}

static etcpak_force_inline __m128i PrepareAverages_AVX2( v4i a[8], const __m256i sum4) noexcept
{
    ProcessAverages_AVX2(Average_AVX2( sum4 ), a );

    return CalcErrorBlock_AVX2( sum4, a);
}

static etcpak_force_inline void FindBestFit_4x2_AVX2( uint32_t terr[2][8], uint32_t tsel[8], v4i a[8], const uint32_t offset, const uint8_t* data) noexcept
{
    __m256i sel0 = _mm256_setzero_si256();
    __m256i sel1 = _mm256_setzero_si256();

    for (unsigned int j = 0; j < 2; ++j)
    {
        unsigned int bid = offset + 1 - j;

        __m256i squareErrorSum = _mm256_setzero_si256();

        __m128i a0 = _mm_loadl_epi64((const __m128i*)a[bid].data());
        __m256i a1 = _mm256_broadcastq_epi64(a0);

        // Processing one full row each iteration
        for (size_t i = 0; i < 8; i += 4)
        {
            __m128i rgb = _mm_loadu_si128((const __m128i*)(data + i * 4));

            __m256i rgb16 = _mm256_cvtepu8_epi16(rgb);
            __m256i d = _mm256_sub_epi16(a1, rgb16);

            // The scaling values are divided by two and rounded, to allow the differences to be in the range of signed int16
            // This produces slightly different results, but is significant faster
            __m256i pixel0 = _mm256_madd_epi16(d, _mm256_set_epi16(0, 38, 76, 14, 0, 38, 76, 14, 0, 38, 76, 14, 0, 38, 76, 14));
            __m256i pixel1 = _mm256_packs_epi32(pixel0, pixel0);
            __m256i pixel2 = _mm256_hadd_epi16(pixel1, pixel1);
            __m128i pixel3 = _mm256_castsi256_si128(pixel2);

            __m128i pix0 = _mm_broadcastw_epi16(pixel3);
            __m128i pix1 = _mm_broadcastw_epi16(_mm_srli_epi32(pixel3, 16));
            __m256i pixel = _mm256_insertf128_si256(_mm256_castsi128_si256(pix0), pix1, 1);

            // Processing first two pixels of the row
            {
                __m256i pix = _mm256_abs_epi16(pixel);

                // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
                // Since the selector table is symmetrical, we need to calculate the difference only for half of the entries.
                __m256i error0 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[0])));
                __m256i error1 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[1])));

                __m256i minIndex0 = _mm256_and_si256(_mm256_cmpgt_epi16(error0, error1), _mm256_set1_epi16(1));
                __m256i minError = _mm256_min_epi16(error0, error1);

                // Exploiting symmetry of the selector table and use the sign bit
                // This produces slightly different results, but is significant faster
                __m256i minIndex1 = _mm256_srli_epi16(pixel, 15);

                // Interleaving values so madd instruction can be used
                __m256i minErrorLo = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(1, 1, 0, 0));
                __m256i minErrorHi = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(3, 3, 2, 2));

                __m256i minError2 = _mm256_unpacklo_epi16(minErrorLo, minErrorHi);
                // Squaring the minimum error to produce correct values when adding
                __m256i squareError = _mm256_madd_epi16(minError2, minError2);

                squareErrorSum = _mm256_add_epi32(squareErrorSum, squareError);

                // Packing selector bits
                __m256i minIndexLo2 = _mm256_sll_epi16(minIndex0, _mm_cvtsi64_si128(i + j * 8));
                __m256i minIndexHi2 = _mm256_sll_epi16(minIndex1, _mm_cvtsi64_si128(i + j * 8));

                sel0 = _mm256_or_si256(sel0, minIndexLo2);
                sel1 = _mm256_or_si256(sel1, minIndexHi2);
            }

            pixel3 = _mm256_extracti128_si256(pixel2, 1);
            pix0 = _mm_broadcastw_epi16(pixel3);
            pix1 = _mm_broadcastw_epi16(_mm_srli_epi32(pixel3, 16));
            pixel = _mm256_insertf128_si256(_mm256_castsi128_si256(pix0), pix1, 1);

            // Processing second two pixels of the row
            {
                __m256i pix = _mm256_abs_epi16(pixel);

                // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
                // Since the selector table is symmetrical, we need to calculate the difference only for half of the entries.
                __m256i error0 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[0])));
                __m256i error1 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[1])));

                __m256i minIndex0 = _mm256_and_si256(_mm256_cmpgt_epi16(error0, error1), _mm256_set1_epi16(1));
                __m256i minError = _mm256_min_epi16(error0, error1);

                // Exploiting symmetry of the selector table and use the sign bit
                __m256i minIndex1 = _mm256_srli_epi16(pixel, 15);

                // Interleaving values so madd instruction can be used
                __m256i minErrorLo = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(1, 1, 0, 0));
                __m256i minErrorHi = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(3, 3, 2, 2));

                __m256i minError2 = _mm256_unpacklo_epi16(minErrorLo, minErrorHi);
                // Squaring the minimum error to produce correct values when adding
                __m256i squareError = _mm256_madd_epi16(minError2, minError2);

                squareErrorSum = _mm256_add_epi32(squareErrorSum, squareError);

                // Packing selector bits
                __m256i minIndexLo2 = _mm256_sll_epi16(minIndex0, _mm_cvtsi64_si128(i + j * 8));
                __m256i minIndexHi2 = _mm256_sll_epi16(minIndex1, _mm_cvtsi64_si128(i + j * 8));
                __m256i minIndexLo3 = _mm256_slli_epi16(minIndexLo2, 2);
                __m256i minIndexHi3 = _mm256_slli_epi16(minIndexHi2, 2);

                sel0 = _mm256_or_si256(sel0, minIndexLo3);
                sel1 = _mm256_or_si256(sel1, minIndexHi3);
            }
        }

        data += 8 * 4;

        _mm256_store_si256((__m256i*)terr[1 - j], squareErrorSum);
    }

    // Interleave selector bits
    __m256i minIndexLo0 = _mm256_unpacklo_epi16(sel0, sel1);
    __m256i minIndexHi0 = _mm256_unpackhi_epi16(sel0, sel1);

    __m256i minIndexLo1 = _mm256_permute2x128_si256(minIndexLo0, minIndexHi0, (0) | (2 << 4));
    __m256i minIndexHi1 = _mm256_permute2x128_si256(minIndexLo0, minIndexHi0, (1) | (3 << 4));

    __m256i minIndexHi2 = _mm256_slli_epi32(minIndexHi1, 1);

    __m256i sel = _mm256_or_si256(minIndexLo1, minIndexHi2);

    _mm256_store_si256((__m256i*)tsel, sel);
}

static etcpak_force_inline void FindBestFit_2x4_AVX2( uint32_t terr[2][8], uint32_t tsel[8], v4i a[8], const uint32_t offset, const uint8_t* data) noexcept
{
    __m256i sel0 = _mm256_setzero_si256();
    __m256i sel1 = _mm256_setzero_si256();

    __m256i squareErrorSum0 = _mm256_setzero_si256();
    __m256i squareErrorSum1 = _mm256_setzero_si256();

    __m128i a0 = _mm_loadl_epi64((const __m128i*)a[offset + 1].data());
    __m128i a1 = _mm_loadl_epi64((const __m128i*)a[offset + 0].data());

    __m128i a2 = _mm_broadcastq_epi64(a0);
    __m128i a3 = _mm_broadcastq_epi64(a1);
    __m256i a4 = _mm256_insertf128_si256(_mm256_castsi128_si256(a2), a3, 1);

    // Processing one full row each iteration
    for (size_t i = 0; i < 16; i += 4)
    {
        __m128i rgb = _mm_loadu_si128((const __m128i*)(data + i * 4));

        __m256i rgb16 = _mm256_cvtepu8_epi16(rgb);
        __m256i d = _mm256_sub_epi16(a4, rgb16);

        // The scaling values are divided by two and rounded, to allow the differences to be in the range of signed int16
        // This produces slightly different results, but is significant faster
        __m256i pixel0 = _mm256_madd_epi16(d, _mm256_set_epi16(0, 38, 76, 14, 0, 38, 76, 14, 0, 38, 76, 14, 0, 38, 76, 14));
        __m256i pixel1 = _mm256_packs_epi32(pixel0, pixel0);
        __m256i pixel2 = _mm256_hadd_epi16(pixel1, pixel1);
        __m128i pixel3 = _mm256_castsi256_si128(pixel2);

        __m128i pix0 = _mm_broadcastw_epi16(pixel3);
        __m128i pix1 = _mm_broadcastw_epi16(_mm_srli_epi32(pixel3, 16));
        __m256i pixel = _mm256_insertf128_si256(_mm256_castsi128_si256(pix0), pix1, 1);

        // Processing first two pixels of the row
        {
            __m256i pix = _mm256_abs_epi16(pixel);

            // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
            // Since the selector table is symmetrical, we need to calculate the difference only for half of the entries.
            __m256i error0 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[0])));
            __m256i error1 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[1])));

            __m256i minIndex0 = _mm256_and_si256(_mm256_cmpgt_epi16(error0, error1), _mm256_set1_epi16(1));
            __m256i minError = _mm256_min_epi16(error0, error1);

            // Exploiting symmetry of the selector table and use the sign bit
            __m256i minIndex1 = _mm256_srli_epi16(pixel, 15);

            // Interleaving values so madd instruction can be used
            __m256i minErrorLo = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(1, 1, 0, 0));
            __m256i minErrorHi = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(3, 3, 2, 2));

            __m256i minError2 = _mm256_unpacklo_epi16(minErrorLo, minErrorHi);
            // Squaring the minimum error to produce correct values when adding
            __m256i squareError = _mm256_madd_epi16(minError2, minError2);

            squareErrorSum0 = _mm256_add_epi32(squareErrorSum0, squareError);

            // Packing selector bits
            __m256i minIndexLo2 = _mm256_sll_epi16(minIndex0, _mm_cvtsi64_si128(i));
            __m256i minIndexHi2 = _mm256_sll_epi16(minIndex1, _mm_cvtsi64_si128(i));

            sel0 = _mm256_or_si256(sel0, minIndexLo2);
            sel1 = _mm256_or_si256(sel1, minIndexHi2);
        }

        pixel3 = _mm256_extracti128_si256(pixel2, 1);
        pix0 = _mm_broadcastw_epi16(pixel3);
        pix1 = _mm_broadcastw_epi16(_mm_srli_epi32(pixel3, 16));
        pixel = _mm256_insertf128_si256(_mm256_castsi128_si256(pix0), pix1, 1);

        // Processing second two pixels of the row
        {
            __m256i pix = _mm256_abs_epi16(pixel);

            // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
            // Since the selector table is symmetrical, we need to calculate the difference only for half of the entries.
            __m256i error0 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[0])));
            __m256i error1 = _mm256_abs_epi16(_mm256_sub_epi16(pix, _mm256_broadcastsi128_si256(g_table128_SIMD[1])));

            __m256i minIndex0 = _mm256_and_si256(_mm256_cmpgt_epi16(error0, error1), _mm256_set1_epi16(1));
            __m256i minError = _mm256_min_epi16(error0, error1);

            // Exploiting symmetry of the selector table and use the sign bit
            __m256i minIndex1 = _mm256_srli_epi16(pixel, 15);

            // Interleaving values so madd instruction can be used
            __m256i minErrorLo = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(1, 1, 0, 0));
            __m256i minErrorHi = _mm256_permute4x64_epi64(minError, _MM_SHUFFLE(3, 3, 2, 2));

            __m256i minError2 = _mm256_unpacklo_epi16(minErrorLo, minErrorHi);
            // Squaring the minimum error to produce correct values when adding
            __m256i squareError = _mm256_madd_epi16(minError2, minError2);

            squareErrorSum1 = _mm256_add_epi32(squareErrorSum1, squareError);

            // Packing selector bits
            __m256i minIndexLo2 = _mm256_sll_epi16(minIndex0, _mm_cvtsi64_si128(i));
            __m256i minIndexHi2 = _mm256_sll_epi16(minIndex1, _mm_cvtsi64_si128(i));
            __m256i minIndexLo3 = _mm256_slli_epi16(minIndexLo2, 2);
            __m256i minIndexHi3 = _mm256_slli_epi16(minIndexHi2, 2);

            sel0 = _mm256_or_si256(sel0, minIndexLo3);
            sel1 = _mm256_or_si256(sel1, minIndexHi3);
        }
    }

    _mm256_store_si256((__m256i*)terr[1], squareErrorSum0);
    _mm256_store_si256((__m256i*)terr[0], squareErrorSum1);

    // Interleave selector bits
    __m256i minIndexLo0 = _mm256_unpacklo_epi16(sel0, sel1);
    __m256i minIndexHi0 = _mm256_unpackhi_epi16(sel0, sel1);

    __m256i minIndexLo1 = _mm256_permute2x128_si256(minIndexLo0, minIndexHi0, (0) | (2 << 4));
    __m256i minIndexHi1 = _mm256_permute2x128_si256(minIndexLo0, minIndexHi0, (1) | (3 << 4));

    __m256i minIndexHi2 = _mm256_slli_epi32(minIndexHi1, 1);

    __m256i sel = _mm256_or_si256(minIndexLo1, minIndexHi2);

    _mm256_store_si256((__m256i*)tsel, sel);
}

static etcpak_force_inline uint64_t EncodeSelectors_AVX2( uint64_t d, const uint32_t terr[2][8], const uint32_t tsel[8], const bool rotate) noexcept
{
    size_t tidx[2];

    // Get index of minimum error (terr[0] and terr[1])
    __m256i err0 = _mm256_load_si256((const __m256i*)terr[0]);
    __m256i err1 = _mm256_load_si256((const __m256i*)terr[1]);

    __m256i errLo = _mm256_permute2x128_si256(err0, err1, (0) | (2 << 4));
    __m256i errHi = _mm256_permute2x128_si256(err0, err1, (1) | (3 << 4));

    __m256i errMin0 = _mm256_min_epu32(errLo, errHi);

    __m256i errMin1 = _mm256_shuffle_epi32(errMin0, _MM_SHUFFLE(2, 3, 0, 1));
    __m256i errMin2 = _mm256_min_epu32(errMin0, errMin1);

    __m256i errMin3 = _mm256_shuffle_epi32(errMin2, _MM_SHUFFLE(1, 0, 3, 2));
    __m256i errMin4 = _mm256_min_epu32(errMin3, errMin2);

    __m256i errMin5 = _mm256_permute2x128_si256(errMin4, errMin4, (0) | (0 << 4));
    __m256i errMin6 = _mm256_permute2x128_si256(errMin4, errMin4, (1) | (1 << 4));

    __m256i errMask0 = _mm256_cmpeq_epi32(errMin5, err0);
    __m256i errMask1 = _mm256_cmpeq_epi32(errMin6, err1);

    uint32_t mask0 = _mm256_movemask_epi8(errMask0);
    uint32_t mask1 = _mm256_movemask_epi8(errMask1);

    tidx[0] = _bit_scan_forward(mask0) >> 2;
    tidx[1] = _bit_scan_forward(mask1) >> 2;

    d |= tidx[0] << 26;
    d |= tidx[1] << 29;

    unsigned int t0 = tsel[tidx[0]];
    unsigned int t1 = tsel[tidx[1]];

    if (!rotate)
    {
        t0 &= 0xFF00FF00;
        t1 &= 0x00FF00FF;
    }
    else
    {
        t0 &= 0xCCCCCCCC;
        t1 &= 0x33333333;
    }

    // Flip selectors from sign bit
    unsigned int t2 = (t0 | t1) ^ 0xFFFF0000;

    return d | static_cast<uint64_t>(_bswap(t2)) << 32;
}

static etcpak_force_inline __m128i r6g7b6_AVX2(__m128 cof, __m128 chf, __m128 cvf) noexcept
{
    __m128i co = _mm_cvttps_epi32(cof);
    __m128i ch = _mm_cvttps_epi32(chf);
    __m128i cv = _mm_cvttps_epi32(cvf);

    __m128i coh = _mm_packus_epi32(co, ch);
    __m128i cv0 = _mm_packus_epi32(cv, _mm_setzero_si128());

    __m256i cohv0 = _mm256_inserti128_si256(_mm256_castsi128_si256(coh), cv0, 1);
    __m256i cohv1 = _mm256_min_epu16(cohv0, _mm256_set1_epi16(1023));

    __m256i cohv2 = _mm256_sub_epi16(cohv1, _mm256_set1_epi16(15));
    __m256i cohv3 = _mm256_srai_epi16(cohv2, 1);

    __m256i cohvrb0 = _mm256_add_epi16(cohv3, _mm256_set1_epi16(11));
    __m256i cohvrb1 = _mm256_add_epi16(cohv3, _mm256_set1_epi16(4));
    __m256i cohvg0 = _mm256_add_epi16(cohv3, _mm256_set1_epi16(9));
    __m256i cohvg1 = _mm256_add_epi16(cohv3, _mm256_set1_epi16(6));

    __m256i cohvrb2 = _mm256_srai_epi16(cohvrb0, 7);
    __m256i cohvrb3 = _mm256_srai_epi16(cohvrb1, 7);
    __m256i cohvg2 = _mm256_srai_epi16(cohvg0, 8);
    __m256i cohvg3 = _mm256_srai_epi16(cohvg1, 8);

    __m256i cohvrb4 = _mm256_sub_epi16(cohvrb0, cohvrb2);
    __m256i cohvrb5 = _mm256_sub_epi16(cohvrb4, cohvrb3);
    __m256i cohvg4 = _mm256_sub_epi16(cohvg0, cohvg2);
    __m256i cohvg5 = _mm256_sub_epi16(cohvg4, cohvg3);

    __m256i cohvrb6 = _mm256_srai_epi16(cohvrb5, 3);
    __m256i cohvg6 = _mm256_srai_epi16(cohvg5, 2);

    __m256i cohv4 = _mm256_blend_epi16(cohvg6, cohvrb6, 0x55);

    __m128i cohv5 = _mm_packus_epi16(_mm256_castsi256_si128(cohv4), _mm256_extracti128_si256(cohv4, 1));
    return _mm_shuffle_epi8(cohv5, _mm_setr_epi8(6, 5, 4, -1, 2, 1, 0, -1, 10, 9, 8, -1, -1, -1, -1, -1));
}

static etcpak_force_inline Plane Planar_AVX2( const Channels& ch, uint8_t& mode, bool useHeuristics )
{
    __m128i t0 = _mm_sad_epu8( ch.r8, _mm_setzero_si128() );
    __m128i t1 = _mm_sad_epu8( ch.g8, _mm_setzero_si128() );
    __m128i t2 = _mm_sad_epu8( ch.b8, _mm_setzero_si128() );

    __m128i r8s = _mm_shuffle_epi8( ch.r8, _mm_set_epi8( 0xF, 0xE, 0xB, 0xA, 0x7, 0x6, 0x3, 0x2, 0xD, 0xC, 0x9, 0x8, 0x5, 0x4, 0x1, 0x0 ) );
    __m128i g8s = _mm_shuffle_epi8( ch.g8, _mm_set_epi8( 0xF, 0xE, 0xB, 0xA, 0x7, 0x6, 0x3, 0x2, 0xD, 0xC, 0x9, 0x8, 0x5, 0x4, 0x1, 0x0 ) );
    __m128i b8s = _mm_shuffle_epi8( ch.b8, _mm_set_epi8( 0xF, 0xE, 0xB, 0xA, 0x7, 0x6, 0x3, 0x2, 0xD, 0xC, 0x9, 0x8, 0x5, 0x4, 0x1, 0x0 ) );

    __m128i s0 = _mm_sad_epu8( r8s, _mm_setzero_si128() );
    __m128i s1 = _mm_sad_epu8( g8s, _mm_setzero_si128() );
    __m128i s2 = _mm_sad_epu8( b8s, _mm_setzero_si128() );

    __m256i sr0 = _mm256_insertf128_si256( _mm256_castsi128_si256( t0 ), s0, 1 );
    __m256i sg0 = _mm256_insertf128_si256( _mm256_castsi128_si256( t1 ), s1, 1 );
    __m256i sb0 = _mm256_insertf128_si256( _mm256_castsi128_si256( t2 ), s2, 1 );

    __m256i sr1 = _mm256_slli_epi64( sr0, 32 );
    __m256i sg1 = _mm256_slli_epi64( sg0, 16 );

    __m256i srb = _mm256_or_si256( sr1, sb0 );
    __m256i srgb = _mm256_or_si256( srb, sg1 );

    if( mode != ModePlanar && useHeuristics )
    {
        Plane plane;
        plane.sum4 = _mm256_permute4x64_epi64( srgb, _MM_SHUFFLE( 2, 3, 0, 1 ) );
        return plane;
    }

    __m128i t3 = _mm_castps_si128( _mm_shuffle_ps( _mm_castsi128_ps( t0 ), _mm_castsi128_ps( t1 ), _MM_SHUFFLE( 2, 0, 2, 0 ) ) );
    __m128i t4 = _mm_shuffle_epi32( t2, _MM_SHUFFLE( 3, 1, 2, 0 ) );
    __m128i t5 = _mm_hadd_epi32( t3, t4 );
    __m128i t6 = _mm_shuffle_epi32( t5, _MM_SHUFFLE( 1, 1, 1, 1 ) );
    __m128i t7 = _mm_shuffle_epi32( t5, _MM_SHUFFLE( 2, 2, 2, 2 ) );

    __m256i sr = _mm256_broadcastw_epi16( t5 );
    __m256i sg = _mm256_broadcastw_epi16( t6 );
    __m256i sb = _mm256_broadcastw_epi16( t7 );

    __m256i r08 = _mm256_cvtepu8_epi16( ch.r8 );
    __m256i g08 = _mm256_cvtepu8_epi16( ch.g8 );
    __m256i b08 = _mm256_cvtepu8_epi16( ch.b8 );

    __m256i r16 = _mm256_slli_epi16( r08, 4 );
    __m256i g16 = _mm256_slli_epi16( g08, 4 );
    __m256i b16 = _mm256_slli_epi16( b08, 4 );

    __m256i difR0 = _mm256_sub_epi16( r16, sr );
    __m256i difG0 = _mm256_sub_epi16( g16, sg );
    __m256i difB0 = _mm256_sub_epi16( b16, sb );

    __m256i difRyz = _mm256_madd_epi16( difR0, _mm256_set_epi16( 255, 85, -85, -255, 255, 85, -85, -255, 255, 85, -85, -255, 255, 85, -85, -255 ) );
    __m256i difGyz = _mm256_madd_epi16( difG0, _mm256_set_epi16( 255, 85, -85, -255, 255, 85, -85, -255, 255, 85, -85, -255, 255, 85, -85, -255 ) );
    __m256i difByz = _mm256_madd_epi16( difB0, _mm256_set_epi16( 255, 85, -85, -255, 255, 85, -85, -255, 255, 85, -85, -255, 255, 85, -85, -255 ) );

    __m256i difRxz = _mm256_madd_epi16( difR0, _mm256_set_epi16( 255, 255, 255, 255, 85, 85, 85, 85, -85, -85, -85, -85, -255, -255, -255, -255 ) );
    __m256i difGxz = _mm256_madd_epi16( difG0, _mm256_set_epi16( 255, 255, 255, 255, 85, 85, 85, 85, -85, -85, -85, -85, -255, -255, -255, -255 ) );
    __m256i difBxz = _mm256_madd_epi16( difB0, _mm256_set_epi16( 255, 255, 255, 255, 85, 85, 85, 85, -85, -85, -85, -85, -255, -255, -255, -255 ) );

    __m256i difRGyz = _mm256_hadd_epi32( difRyz, difGyz );
    __m256i difByzxz = _mm256_hadd_epi32( difByz, difBxz );

    __m256i difRGxz = _mm256_hadd_epi32( difRxz, difGxz );

    __m128i sumRGyz = _mm_add_epi32( _mm256_castsi256_si128( difRGyz ), _mm256_extracti128_si256( difRGyz, 1 ) );
    __m128i sumByzxz = _mm_add_epi32( _mm256_castsi256_si128( difByzxz ), _mm256_extracti128_si256( difByzxz, 1 ) );
    __m128i sumRGxz = _mm_add_epi32( _mm256_castsi256_si128( difRGxz ), _mm256_extracti128_si256( difRGxz, 1 ) );

    __m128i sumRGByz = _mm_hadd_epi32( sumRGyz, sumByzxz );
    __m128i sumRGByzxz = _mm_hadd_epi32( sumRGxz, sumByzxz );

    __m128i sumRGBxz = _mm_shuffle_epi32( sumRGByzxz, _MM_SHUFFLE( 2, 3, 1, 0 ) );

    __m128 sumRGByzf = _mm_cvtepi32_ps( sumRGByz );
    __m128 sumRGBxzf = _mm_cvtepi32_ps( sumRGBxz );

    const float value = ( 255 * 255 * 8.0f + 85 * 85 * 8.0f ) * 16.0f;

    __m128 scale = _mm_set1_ps( -4.0f / value );

    __m128 af = _mm_mul_ps( sumRGBxzf, scale );
    __m128 bf = _mm_mul_ps( sumRGByzf, scale );

    __m128 df = _mm_mul_ps( _mm_cvtepi32_ps( t5 ), _mm_set1_ps( 4.0f / 16.0f ) );

    // calculating the three colors RGBO, RGBH, and RGBV.  RGB = df - af * x - bf * y;
    __m128 cof0 = _mm_fnmadd_ps( af, _mm_set1_ps( -255.0f ), _mm_fnmadd_ps( bf, _mm_set1_ps( -255.0f ), df ) );
    __m128 chf0 = _mm_fnmadd_ps( af, _mm_set1_ps( 425.0f ), _mm_fnmadd_ps( bf, _mm_set1_ps( -255.0f ), df ) );
    __m128 cvf0 = _mm_fnmadd_ps( af, _mm_set1_ps( -255.0f ), _mm_fnmadd_ps( bf, _mm_set1_ps( 425.0f ), df ) );

    // convert to r6g7b6
    __m128i cohv = r6g7b6_AVX2( cof0, chf0, cvf0 );

    uint64_t rgbho = _mm_extract_epi64( cohv, 0 );
    uint32_t rgbv0 = _mm_extract_epi32( cohv, 2 );

    // Error calculation
    uint64_t error = 0;
    if( !useHeuristics )
    {
        auto ro0 = ( rgbho >> 48 ) & 0x3F;
        auto go0 = ( rgbho >> 40 ) & 0x7F;
        auto bo0 = ( rgbho >> 32 ) & 0x3F;
        auto ro1 = ( ro0 >> 4 ) | ( ro0 << 2 );
        auto go1 = ( go0 >> 6 ) | ( go0 << 1 );
        auto bo1 = ( bo0 >> 4 ) | ( bo0 << 2 );
        auto ro2 = ( ro1 << 2 ) + 2;
        auto go2 = ( go1 << 2 ) + 2;
        auto bo2 = ( bo1 << 2 ) + 2;

        __m256i ro3 = _mm256_set1_epi16( ro2 );
        __m256i go3 = _mm256_set1_epi16( go2 );
        __m256i bo3 = _mm256_set1_epi16( bo2 );

        auto rh0 = ( rgbho >> 16 ) & 0x3F;
        auto gh0 = ( rgbho >> 8 ) & 0x7F;
        auto bh0 = ( rgbho >> 0 ) & 0x3F;
        auto rh1 = ( rh0 >> 4 ) | ( rh0 << 2 );
        auto gh1 = ( gh0 >> 6 ) | ( gh0 << 1 );
        auto bh1 = ( bh0 >> 4 ) | ( bh0 << 2 );

        auto rh2 = rh1 - ro1;
        auto gh2 = gh1 - go1;
        auto bh2 = bh1 - bo1;

        __m256i rh3 = _mm256_set1_epi16( rh2 );
        __m256i gh3 = _mm256_set1_epi16( gh2 );
        __m256i bh3 = _mm256_set1_epi16( bh2 );

        auto rv0 = ( rgbv0 >> 16 ) & 0x3F;
        auto gv0 = ( rgbv0 >> 8 ) & 0x7F;
        auto bv0 = ( rgbv0 >> 0 ) & 0x3F;
        auto rv1 = ( rv0 >> 4 ) | ( rv0 << 2 );
        auto gv1 = ( gv0 >> 6 ) | ( gv0 << 1 );
        auto bv1 = ( bv0 >> 4 ) | ( bv0 << 2 );

        auto rv2 = rv1 - ro1;
        auto gv2 = gv1 - go1;
        auto bv2 = bv1 - bo1;

        __m256i rv3 = _mm256_set1_epi16( rv2 );
        __m256i gv3 = _mm256_set1_epi16( gv2 );
        __m256i bv3 = _mm256_set1_epi16( bv2 );

        __m256i x = _mm256_set_epi16( 3, 3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0 );

        __m256i rh4 = _mm256_mullo_epi16( rh3, x );
        __m256i gh4 = _mm256_mullo_epi16( gh3, x );
        __m256i bh4 = _mm256_mullo_epi16( bh3, x );

        __m256i y = _mm256_set_epi16( 3, 2, 1, 0, 3, 2, 1, 0, 3, 2, 1, 0, 3, 2, 1, 0 );

        __m256i rv4 = _mm256_mullo_epi16( rv3, y );
        __m256i gv4 = _mm256_mullo_epi16( gv3, y );
        __m256i bv4 = _mm256_mullo_epi16( bv3, y );

        __m256i rxy = _mm256_add_epi16( rh4, rv4 );
        __m256i gxy = _mm256_add_epi16( gh4, gv4 );
        __m256i bxy = _mm256_add_epi16( bh4, bv4 );

        __m256i rp0 = _mm256_add_epi16( rxy, ro3 );
        __m256i gp0 = _mm256_add_epi16( gxy, go3 );
        __m256i bp0 = _mm256_add_epi16( bxy, bo3 );

        __m256i rp1 = _mm256_srai_epi16( rp0, 2 );
        __m256i gp1 = _mm256_srai_epi16( gp0, 2 );
        __m256i bp1 = _mm256_srai_epi16( bp0, 2 );

        __m256i rp2 = _mm256_max_epi16( _mm256_min_epi16( rp1, _mm256_set1_epi16( 255 ) ), _mm256_setzero_si256() );
        __m256i gp2 = _mm256_max_epi16( _mm256_min_epi16( gp1, _mm256_set1_epi16( 255 ) ), _mm256_setzero_si256() );
        __m256i bp2 = _mm256_max_epi16( _mm256_min_epi16( bp1, _mm256_set1_epi16( 255 ) ), _mm256_setzero_si256() );

        __m256i rdif = _mm256_sub_epi16( r08, rp2 );
        __m256i gdif = _mm256_sub_epi16( g08, gp2 );
        __m256i bdif = _mm256_sub_epi16( b08, bp2 );

        __m256i rerr = _mm256_mullo_epi16( rdif, _mm256_set1_epi16( 38 ) );
        __m256i gerr = _mm256_mullo_epi16( gdif, _mm256_set1_epi16( 76 ) );
        __m256i berr = _mm256_mullo_epi16( bdif, _mm256_set1_epi16( 14 ) );

        __m256i sum0 = _mm256_add_epi16( rerr, gerr );
        __m256i sum1 = _mm256_add_epi16( sum0, berr );

        __m256i sum2 = _mm256_madd_epi16( sum1, sum1 );

        __m128i sum3 = _mm_add_epi32( _mm256_castsi256_si128( sum2 ), _mm256_extracti128_si256( sum2, 1 ) );

        uint32_t err0 = _mm_extract_epi32( sum3, 0 );
        uint32_t err1 = _mm_extract_epi32( sum3, 1 );
        uint32_t err2 = _mm_extract_epi32( sum3, 2 );
        uint32_t err3 = _mm_extract_epi32( sum3, 3 );

        error = err0 + err1 + err2 + err3;
    }
    /**/

    uint32_t rgbv = ( rgbv0 & 0x3F ) | ( ( rgbv0 >> 2 ) & 0x1FC0 ) | ( ( rgbv0 >> 3 ) & 0x7E000 );
    uint64_t rgbho0_ = ( rgbho & 0x3F0000003F ) | ( ( rgbho >> 2 ) & 0x1FC000001FC0 ) | ( ( rgbho >> 3 ) & 0x7E0000007E000 );
    uint64_t rgbho0 = ( rgbho0_ & 0x7FFFF ) | ( ( rgbho0_ >> 13 ) & 0x3FFFF80000 );

    uint32_t hi = rgbv | ((rgbho0 & 0x1FFF) << 19);
    rgbho0 >>= 13;
    uint32_t lo = ( rgbho0 & 0x1 ) | ( ( rgbho0 & 0x1FE ) << 1 ) | ( ( rgbho0 & 0x600 ) << 2 ) | ( ( rgbho0 & 0x3F800 ) << 5 ) | ( ( rgbho0 & 0x1FC0000 ) << 6 );

    uint32_t idx = ( ( rgbho >> 33 ) & 0xF ) | ( ( rgbho >> 41 ) & 0x10 ) | ( ( rgbho >> 48 ) & 0x20 );
    lo |= g_flags[idx];
    uint64_t result = static_cast<uint32_t>(_bswap(lo));
    result |= static_cast<uint64_t>(static_cast<uint32_t>(_bswap(hi))) << 32;

    Plane plane;

    plane.plane = result;
    if( useHeuristics )
    {
        plane.error = 0;
        mode = ModePlanar;
    }
    else
    {
        plane.error = error;
    }
    plane.sum4 = _mm256_permute4x64_epi64(srgb, _MM_SHUFFLE(2, 3, 0, 1));

    return plane;
}

static etcpak_force_inline uint64_t EncodeSelectors_AVX2( uint64_t d, const uint32_t terr[2][8], const uint32_t tsel[8], const bool rotate, const uint64_t value, const uint32_t error) noexcept
{
    size_t tidx[2];

    // Get index of minimum error (terr[0] and terr[1])
    __m256i err0 = _mm256_load_si256((const __m256i*)terr[0]);
    __m256i err1 = _mm256_load_si256((const __m256i*)terr[1]);

    __m256i errLo = _mm256_permute2x128_si256(err0, err1, (0) | (2 << 4));
    __m256i errHi = _mm256_permute2x128_si256(err0, err1, (1) | (3 << 4));

    __m256i errMin0 = _mm256_min_epu32(errLo, errHi);

    __m256i errMin1 = _mm256_shuffle_epi32(errMin0, _MM_SHUFFLE(2, 3, 0, 1));
    __m256i errMin2 = _mm256_min_epu32(errMin0, errMin1);

    __m256i errMin3 = _mm256_shuffle_epi32(errMin2, _MM_SHUFFLE(1, 0, 3, 2));
    __m256i errMin4 = _mm256_min_epu32(errMin3, errMin2);

    __m256i errMin5 = _mm256_permute2x128_si256(errMin4, errMin4, (0) | (0 << 4));
    __m256i errMin6 = _mm256_permute2x128_si256(errMin4, errMin4, (1) | (1 << 4));

    __m256i errMask0 = _mm256_cmpeq_epi32(errMin5, err0);
    __m256i errMask1 = _mm256_cmpeq_epi32(errMin6, err1);

    uint32_t mask0 = _mm256_movemask_epi8(errMask0);
    uint32_t mask1 = _mm256_movemask_epi8(errMask1);

    tidx[0] = _bit_scan_forward(mask0) >> 2;
    tidx[1] = _bit_scan_forward(mask1) >> 2;

    if ((terr[0][tidx[0]] + terr[1][tidx[1]]) >= error)
    {
        return value;
    }

    d |= tidx[0] << 26;
    d |= tidx[1] << 29;

    unsigned int t0 = tsel[tidx[0]];
    unsigned int t1 = tsel[tidx[1]];

    if (!rotate)
    {
        t0 &= 0xFF00FF00;
        t1 &= 0x00FF00FF;
    }
    else
    {
        t0 &= 0xCCCCCCCC;
        t1 &= 0x33333333;
    }

    // Flip selectors from sign bit
    unsigned int t2 = (t0 | t1) ^ 0xFFFF0000;

    return d | static_cast<uint64_t>(_bswap(t2)) << 32;
}

#endif

static etcpak_force_inline void Average( const uint8_t* data, v4i* a )
{}

static etcpak_force_inline void CalcErrorBlock( const uint8_t* data, unsigned int err[4][4] )
{}

static etcpak_force_inline unsigned int CalcError( const unsigned int block[4], const v4i& average )
{}

static etcpak_force_inline void ProcessAverages( v4i* a )
{}

static etcpak_force_inline void EncodeAverages( uint64_t& _d, const v4i* a, size_t idx )
{}

static etcpak_force_inline uint64_t CheckSolid( const uint8_t* src )
{}

static etcpak_force_inline void PrepareAverages( v4i a[8], const uint8_t* src, unsigned int err[4] )
{}

static etcpak_force_inline void FindBestFit( uint64_t terr[2][8], uint16_t tsel[16][8], v4i a[8], const uint32_t* id, const uint8_t* data )
{}

#if defined __SSE4_1__ || defined __ARM_NEON
// Non-reference implementation, but faster. Produces same results as the AVX2 version
static etcpak_force_inline void FindBestFit( uint32_t terr[2][8], uint16_t tsel[16][8], v4i a[8], const uint32_t* id, const uint8_t* data )
{
    for( size_t i=0; i<16; i++ )
    {
        uint16_t* sel = tsel[i];
        unsigned int bid = id[i];
        uint32_t* ter = terr[bid%2];

        uint8_t b = *data++;
        uint8_t g = *data++;
        uint8_t r = *data++;
        data++;

        int dr = a[bid][0] - r;
        int dg = a[bid][1] - g;
        int db = a[bid][2] - b;

#ifdef __SSE4_1__
        // The scaling values are divided by two and rounded, to allow the differences to be in the range of signed int16
        // This produces slightly different results, but is significant faster
        __m128i pixel = _mm_set1_epi16(dr * 38 + dg * 76 + db * 14);
        __m128i pix = _mm_abs_epi16(pixel);

        // Taking the absolute value is way faster. The values are only used to sort, so the result will be the same.
        // Since the selector table is symmetrical, we need to calculate the difference only for half of the entries.
        __m128i error0 = _mm_abs_epi16(_mm_sub_epi16(pix, g_table128_SIMD[0]));
        __m128i error1 = _mm_abs_epi16(_mm_sub_epi16(pix, g_table128_SIMD[1]));

        __m128i index = _mm_and_si128(_mm_cmplt_epi16(error1, error0), _mm_set1_epi16(1));
        __m128i minError = _mm_min_epi16(error0, error1);

        // Exploiting symmetry of the selector table and use the sign bit
        // This produces slightly different results, but is needed to produce same results as AVX2 implementation
        __m128i indexBit = _mm_andnot_si128(_mm_srli_epi16(pixel, 15), _mm_set1_epi8(-1));
        __m128i minIndex = _mm_or_si128(index, _mm_add_epi16(indexBit, indexBit));

        // Squaring the minimum error to produce correct values when adding
        __m128i squareErrorLo = _mm_mullo_epi16(minError, minError);
        __m128i squareErrorHi = _mm_mulhi_epi16(minError, minError);

        __m128i squareErrorLow = _mm_unpacklo_epi16(squareErrorLo, squareErrorHi);
        __m128i squareErrorHigh = _mm_unpackhi_epi16(squareErrorLo, squareErrorHi);

        squareErrorLow = _mm_add_epi32(squareErrorLow, _mm_loadu_si128(((__m128i*)ter) + 0));
        _mm_storeu_si128(((__m128i*)ter) + 0, squareErrorLow);
        squareErrorHigh = _mm_add_epi32(squareErrorHigh, _mm_loadu_si128(((__m128i*)ter) + 1));
        _mm_storeu_si128(((__m128i*)ter) + 1, squareErrorHigh);

        _mm_storeu_si128((__m128i*)sel, minIndex);
#elif defined __ARM_NEON
        int16x8_t pixel = vdupq_n_s16( dr * 38 + dg * 76 + db * 14 );
        int16x8_t pix = vabsq_s16( pixel );

        int16x8_t error0 = vabsq_s16( vsubq_s16( pix, g_table128_NEON[0] ) );
        int16x8_t error1 = vabsq_s16( vsubq_s16( pix, g_table128_NEON[1] ) );

        int16x8_t index = vandq_s16( vreinterpretq_s16_u16( vcltq_s16( error1, error0 ) ), vdupq_n_s16( 1 ) );
        int16x8_t minError = vminq_s16( error0, error1 );

        int16x8_t indexBit = vandq_s16( vmvnq_s16( vshrq_n_s16( pixel, 15 ) ), vdupq_n_s16( -1 ) );
        int16x8_t minIndex = vorrq_s16( index, vaddq_s16( indexBit, indexBit ) );

        int16x4_t minErrorLow = vget_low_s16( minError );
        int16x4_t minErrorHigh = vget_high_s16( minError );

        int32x4_t squareErrorLow = vmull_s16( minErrorLow, minErrorLow );
        int32x4_t squareErrorHigh = vmull_s16( minErrorHigh, minErrorHigh );

        int32x4_t squareErrorSumLow = vaddq_s32( squareErrorLow, vld1q_s32( (int32_t*)ter ) );
        int32x4_t squareErrorSumHigh = vaddq_s32( squareErrorHigh, vld1q_s32( (int32_t*)ter + 4 ) );

        vst1q_s32( (int32_t*)ter, squareErrorSumLow );
        vst1q_s32( (int32_t*)ter + 4, squareErrorSumHigh );

        vst1q_s16( (int16_t*)sel, minIndex );
#endif
    }
}
#endif

static etcpak_force_inline uint8_t convert6(float f)
{}

static etcpak_force_inline uint8_t convert7(float f)
{}

static etcpak_force_inline std::pair<uint64_t, uint64_t> Planar( const uint8_t* src, const uint8_t mode, bool useHeuristics )
{}

#ifdef __ARM_NEON

static etcpak_force_inline int32x2_t Planar_NEON_DifXZ( int16x8_t dif_lo, int16x8_t dif_hi )
{
    int32x4_t dif0 = vmull_n_s16( vget_low_s16( dif_lo ), -255 );
    int32x4_t dif1 = vmull_n_s16( vget_high_s16( dif_lo ), -85 );
    int32x4_t dif2 = vmull_n_s16( vget_low_s16( dif_hi ), 85 );
    int32x4_t dif3 = vmull_n_s16( vget_high_s16( dif_hi ), 255 );
    int32x4_t dif4 = vaddq_s32( vaddq_s32( dif0, dif1 ), vaddq_s32( dif2, dif3 ) );

#ifndef __aarch64__
    int32x2_t dif5 = vpadd_s32( vget_low_s32( dif4 ), vget_high_s32( dif4 ) );
    return vpadd_s32( dif5, dif5 );
#else
    return vdup_n_s32( vaddvq_s32( dif4 ) );
#endif
}

static etcpak_force_inline int32x2_t Planar_NEON_DifYZ( int16x8_t dif_lo, int16x8_t dif_hi )
{
    int16x4_t scaling = { -255, -85, 85, 255 };
    int32x4_t dif0 = vmull_s16( vget_low_s16( dif_lo ), scaling );
    int32x4_t dif1 = vmull_s16( vget_high_s16( dif_lo ), scaling );
    int32x4_t dif2 = vmull_s16( vget_low_s16( dif_hi ), scaling );
    int32x4_t dif3 = vmull_s16( vget_high_s16( dif_hi ), scaling );
    int32x4_t dif4 = vaddq_s32( vaddq_s32( dif0, dif1 ), vaddq_s32( dif2, dif3 ) );

#ifndef __aarch64__
    int32x2_t dif5 = vpadd_s32( vget_low_s32( dif4 ), vget_high_s32( dif4 ) );
    return vpadd_s32( dif5, dif5 );
#else
    return vdup_n_s32( vaddvq_s32( dif4 ) );
#endif
}

static etcpak_force_inline int16x8_t Planar_NEON_SumWide( uint8x16_t src )
{
    uint16x8_t accu8 = vpaddlq_u8( src );
#ifndef __aarch64__
    uint16x4_t accu4 = vpadd_u16( vget_low_u16( accu8 ), vget_high_u16( accu8 ) );
    uint16x4_t accu2 = vpadd_u16( accu4, accu4 );
    uint16x4_t accu1 = vpadd_u16( accu2, accu2 );
    return vreinterpretq_s16_u16( vcombine_u16( accu1, accu1 ) );
#else
    return vdupq_n_s16( vaddvq_u16( accu8 ) );
#endif
}

static etcpak_force_inline int16x8_t convert6_NEON( int32x4_t lo, int32x4_t hi )
{
    uint16x8_t x = vcombine_u16( vqmovun_s32( lo ), vqmovun_s32( hi ) );
    int16x8_t i = vreinterpretq_s16_u16( vshrq_n_u16( vqshlq_n_u16( x, 6 ), 6) ); // clamp 0-1023
    i = vhsubq_s16( i, vdupq_n_s16( 15 ) );

    int16x8_t ip11 = vaddq_s16( i, vdupq_n_s16( 11 ) );
    int16x8_t ip4 = vaddq_s16( i, vdupq_n_s16( 4 ) );

    return vshrq_n_s16( vsubq_s16( vsubq_s16( ip11, vshrq_n_s16( ip11, 7 ) ), vshrq_n_s16( ip4, 7) ), 3 );
}

static etcpak_force_inline int16x4_t convert7_NEON( int32x4_t x )
{
    int16x4_t i = vreinterpret_s16_u16( vshr_n_u16( vqshl_n_u16( vqmovun_s32( x ), 6 ), 6 ) ); // clamp 0-1023
    i = vhsub_s16( i, vdup_n_s16( 15 ) );

    int16x4_t p9 = vadd_s16( i, vdup_n_s16( 9 ) );
    int16x4_t p6 = vadd_s16( i, vdup_n_s16( 6 ) );
    return vshr_n_s16( vsub_s16( vsub_s16( p9, vshr_n_s16( p9, 8 ) ), vshr_n_s16( p6, 8 ) ), 2 );
}

static etcpak_force_inline std::pair<uint64_t, uint64_t> Planar_NEON( const uint8_t* src, const uint8_t mode, bool useHeuristics )
{
    uint8x16x4_t srcBlock = vld4q_u8( src );

    int16x8_t bSumWide = Planar_NEON_SumWide( srcBlock.val[0] );
    int16x8_t gSumWide = Planar_NEON_SumWide( srcBlock.val[1] );
    int16x8_t rSumWide = Planar_NEON_SumWide( srcBlock.val[2] );

    int16x8_t dif_R_lo = vsubq_s16( vreinterpretq_s16_u16( vshll_n_u8( vget_low_u8( srcBlock.val[2] ), 4) ), rSumWide );
    int16x8_t dif_R_hi = vsubq_s16( vreinterpretq_s16_u16( vshll_n_u8( vget_high_u8( srcBlock.val[2] ), 4) ), rSumWide );

    int16x8_t dif_G_lo = vsubq_s16( vreinterpretq_s16_u16( vshll_n_u8( vget_low_u8( srcBlock.val[1] ), 4 ) ), gSumWide );
    int16x8_t dif_G_hi = vsubq_s16( vreinterpretq_s16_u16( vshll_n_u8( vget_high_u8( srcBlock.val[1] ), 4 ) ), gSumWide );

    int16x8_t dif_B_lo = vsubq_s16( vreinterpretq_s16_u16( vshll_n_u8( vget_low_u8( srcBlock.val[0] ), 4) ), bSumWide );
    int16x8_t dif_B_hi = vsubq_s16( vreinterpretq_s16_u16( vshll_n_u8( vget_high_u8( srcBlock.val[0] ), 4) ), bSumWide );

    int32x2x2_t dif_xz_z = vzip_s32( vzip_s32( Planar_NEON_DifXZ( dif_B_lo, dif_B_hi ), Planar_NEON_DifXZ( dif_R_lo, dif_R_hi ) ).val[0], Planar_NEON_DifXZ( dif_G_lo, dif_G_hi ) );
    int32x4_t dif_xz = vcombine_s32( dif_xz_z.val[0], dif_xz_z.val[1] );
    int32x2x2_t dif_yz_z = vzip_s32( vzip_s32( Planar_NEON_DifYZ( dif_B_lo, dif_B_hi ), Planar_NEON_DifYZ( dif_R_lo, dif_R_hi ) ).val[0], Planar_NEON_DifYZ( dif_G_lo, dif_G_hi ) );
    int32x4_t dif_yz = vcombine_s32( dif_yz_z.val[0], dif_yz_z.val[1] );

    const float fscale = -4.0f / ( (255 * 255 * 8.0f + 85 * 85 * 8.0f ) * 16.0f );
    float32x4_t fa = vmulq_n_f32( vcvtq_f32_s32( dif_xz ), fscale );
    float32x4_t fb = vmulq_n_f32( vcvtq_f32_s32( dif_yz ), fscale );
    int16x4_t bgrgSum = vzip_s16( vzip_s16( vget_low_s16( bSumWide ), vget_low_s16( rSumWide ) ).val[0], vget_low_s16( gSumWide ) ).val[0];
    float32x4_t fd = vmulq_n_f32( vcvtq_f32_s32( vmovl_s16( bgrgSum ) ), 4.0f / 16.0f);

    float32x4_t cof = vmlaq_n_f32( vmlaq_n_f32( fd, fb, 255.0f ), fa, 255.0f );
    float32x4_t chf = vmlaq_n_f32( vmlaq_n_f32( fd, fb, 255.0f ), fa, -425.0f );
    float32x4_t cvf = vmlaq_n_f32( vmlaq_n_f32( fd, fb, -425.0f ), fa, 255.0f );

    int32x4_t coi = vcvtq_s32_f32( cof );
    int32x4_t chi = vcvtq_s32_f32( chf );
    int32x4_t cvi = vcvtq_s32_f32( cvf );

    int32x4x2_t tr_hv = vtrnq_s32( chi, cvi );
    int32x4x2_t tr_o = vtrnq_s32( coi, coi );

    int16x8_t c_hvoo_br_6 = convert6_NEON( tr_hv.val[0], tr_o.val[0] );
    int16x4_t c_hvox_g_7 = convert7_NEON( vcombine_s32( vget_low_s32( tr_hv.val[1] ), vget_low_s32( tr_o.val[1] ) ) );
    int16x8_t c_hvoo_br_8 = vorrq_s16( vshrq_n_s16( c_hvoo_br_6, 4 ), vshlq_n_s16( c_hvoo_br_6, 2 ) );
    int16x4_t c_hvox_g_8 = vorr_s16( vshr_n_s16( c_hvox_g_7, 6 ), vshl_n_s16( c_hvox_g_7, 1 ) );

    uint64_t error = 0;
    if( mode != ModePlanar && useHeuristics )
    {
        int16x4_t rec_gxbr_o = vext_s16( c_hvox_g_8, vget_high_s16( c_hvoo_br_8 ), 3 );

        rec_gxbr_o = vadd_s16( vshl_n_s16( rec_gxbr_o, 2 ), vdup_n_s16( 2 ) );
        int16x8_t rec_ro_wide = vdupq_lane_s16( rec_gxbr_o, 3 );
        int16x8_t rec_go_wide = vdupq_lane_s16( rec_gxbr_o, 0 );
        int16x8_t rec_bo_wide = vdupq_lane_s16( rec_gxbr_o, 1 );

        int16x4_t br_hv2 = vsub_s16( vget_low_s16( c_hvoo_br_8 ), vget_high_s16( c_hvoo_br_8 ) );
        int16x4_t gg_hv2 = vsub_s16( c_hvox_g_8, vdup_lane_s16( c_hvox_g_8, 2 ) );

        int16x8_t scaleh_lo = { 0, 0, 0, 0, 1, 1, 1, 1 };
        int16x8_t scaleh_hi = { 2, 2, 2, 2, 3, 3, 3, 3 };
        int16x8_t scalev = { 0, 1, 2, 3, 0, 1, 2, 3 };

        int16x8_t rec_r_1 = vmlaq_lane_s16( rec_ro_wide, scalev, br_hv2, 3 );
        int16x8_t rec_r_lo = vreinterpretq_s16_u16( vmovl_u8( vqshrun_n_s16( vmlaq_lane_s16( rec_r_1, scaleh_lo, br_hv2, 2 ), 2 ) ) );
        int16x8_t rec_r_hi = vreinterpretq_s16_u16( vmovl_u8( vqshrun_n_s16( vmlaq_lane_s16( rec_r_1, scaleh_hi, br_hv2, 2 ), 2 ) ) );

        int16x8_t rec_b_1 = vmlaq_lane_s16( rec_bo_wide, scalev, br_hv2, 1 );
        int16x8_t rec_b_lo = vreinterpretq_s16_u16( vmovl_u8( vqshrun_n_s16( vmlaq_lane_s16( rec_b_1, scaleh_lo, br_hv2, 0 ), 2 ) ) );
        int16x8_t rec_b_hi = vreinterpretq_s16_u16( vmovl_u8( vqshrun_n_s16( vmlaq_lane_s16( rec_b_1, scaleh_hi, br_hv2, 0 ), 2 ) ) );

        int16x8_t rec_g_1 = vmlaq_lane_s16( rec_go_wide, scalev, gg_hv2, 1 );
        int16x8_t rec_g_lo = vreinterpretq_s16_u16( vmovl_u8( vqshrun_n_s16( vmlaq_lane_s16( rec_g_1, scaleh_lo, gg_hv2, 0 ), 2 ) ) );
        int16x8_t rec_g_hi = vreinterpretq_s16_u16( vmovl_u8( vqshrun_n_s16( vmlaq_lane_s16( rec_g_1, scaleh_hi, gg_hv2, 0 ), 2 ) ) );

        int16x8_t dif_r_lo = vsubq_s16( vreinterpretq_s16_u16( vmovl_u8( vget_low_u8( srcBlock.val[2] ) ) ), rec_r_lo );
        int16x8_t dif_r_hi = vsubq_s16( vreinterpretq_s16_u16( vmovl_u8( vget_high_u8( srcBlock.val[2] ) ) ), rec_r_hi );

        int16x8_t dif_g_lo = vsubq_s16( vreinterpretq_s16_u16( vmovl_u8( vget_low_u8( srcBlock.val[1] ) ) ), rec_g_lo );
        int16x8_t dif_g_hi = vsubq_s16( vreinterpretq_s16_u16( vmovl_u8( vget_high_u8( srcBlock.val[1] ) ) ), rec_g_hi );

        int16x8_t dif_b_lo = vsubq_s16( vreinterpretq_s16_u16( vmovl_u8( vget_low_u8( srcBlock.val[0] ) ) ), rec_b_lo );
        int16x8_t dif_b_hi = vsubq_s16( vreinterpretq_s16_u16( vmovl_u8( vget_high_u8( srcBlock.val[0] ) ) ), rec_b_hi );

        int16x8_t dif_lo = vmlaq_n_s16( vmlaq_n_s16( vmulq_n_s16( dif_r_lo, 38 ), dif_g_lo, 76 ), dif_b_lo, 14 );
        int16x8_t dif_hi = vmlaq_n_s16( vmlaq_n_s16( vmulq_n_s16( dif_r_hi, 38 ), dif_g_hi, 76 ), dif_b_hi, 14 );

        int16x4_t tmpDif = vget_low_s16( dif_lo );
        int32x4_t difsq_0 = vmull_s16( tmpDif, tmpDif );
        tmpDif = vget_high_s16( dif_lo );
        int32x4_t difsq_1 = vmull_s16( tmpDif, tmpDif );
        tmpDif = vget_low_s16( dif_hi );
        int32x4_t difsq_2 = vmull_s16( tmpDif, tmpDif );
        tmpDif = vget_high_s16( dif_hi );
        int32x4_t difsq_3 = vmull_s16( tmpDif, tmpDif );

        uint32x4_t difsq_5 = vaddq_u32( vreinterpretq_u32_s32( difsq_0 ), vreinterpretq_u32_s32( difsq_1 ) );
        uint32x4_t difsq_6 = vaddq_u32( vreinterpretq_u32_s32( difsq_2 ), vreinterpretq_u32_s32( difsq_3 ) );

        uint64x2_t difsq_7 = vaddl_u32( vget_low_u32( difsq_5 ), vget_high_u32( difsq_5 ) );
        uint64x2_t difsq_8 = vaddl_u32( vget_low_u32( difsq_6 ), vget_high_u32( difsq_6 ) );

        uint64x2_t difsq_9 = vaddq_u64( difsq_7, difsq_8 );

#ifdef __aarch64__
        error = vaddvq_u64( difsq_9 );
#else
        error = vgetq_lane_u64( difsq_9, 0 ) + vgetq_lane_u64( difsq_9, 1 );
#endif
    }

    int32_t coR = c_hvoo_br_6[6];
    int32_t coG = c_hvox_g_7[2];
    int32_t coB = c_hvoo_br_6[4];

    int32_t chR = c_hvoo_br_6[2];
    int32_t chG = c_hvox_g_7[0];
    int32_t chB = c_hvoo_br_6[0];

    int32_t cvR = c_hvoo_br_6[3];
    int32_t cvG = c_hvox_g_7[1];
    int32_t cvB = c_hvoo_br_6[1];

    uint32_t rgbv = cvB | ( cvG << 6 ) | ( cvR << 13 );
    uint32_t rgbh = chB | ( chG << 6 ) | ( chR << 13 );
    uint32_t hi = rgbv | ( ( rgbh & 0x1FFF ) << 19 );
    uint32_t lo = ( chR & 0x1 ) | 0x2 | ( ( chR << 1 ) & 0x7C );
    lo |= ( ( coB & 0x07 ) << 7 ) | ( ( coB & 0x18 ) << 8 ) | ( ( coB & 0x20 ) << 11 );
    lo |= ( ( coG & 0x3F) << 17) | ( (coG & 0x40 ) << 18 );
    lo |= coR << 25;

    const auto idx = ( coR & 0x20 ) | ( ( coG & 0x20 ) >> 1 ) | ( ( coB & 0x1E ) >> 1 );

    lo |= g_flags[idx];

    uint64_t result = static_cast<uint32_t>( _bswap(lo) );
    result |= static_cast<uint64_t>( static_cast<uint32_t>( _bswap( hi ) ) ) << 32;

    return std::make_pair( result, error );
}

#endif

#ifdef __AVX2__
uint32_t calculateErrorTH( bool tMode, uint8_t( colorsRGB444 )[2][3], uint8_t& dist, uint32_t& pixIndices, uint8_t startDist, __m128i r8, __m128i g8, __m128i b8 )
#else
uint32_t calculateErrorTH( bool tMode, uint8_t* src, uint8_t( colorsRGB444 )[2][3], uint8_t& dist, uint32_t& pixIndices, uint8_t startDist )
#endif
{}


// main T-/H-mode compression function
#ifdef __AVX2__
uint32_t compressBlockTH( uint8_t* src, Luma& l, uint32_t& compressed1, uint32_t& compressed2, bool& tMode, __m128i r8, __m128i g8, __m128i b8 )
#else
uint32_t compressBlockTH( uint8_t *src, Luma& l, uint32_t& compressed1, uint32_t& compressed2, bool &tMode )
#endif
{}
//#endif

template<class T, class S>
static etcpak_force_inline uint64_t EncodeSelectors( uint64_t d, const T terr[2][8], const S tsel[16][8], const uint32_t* id, const uint64_t value, const uint64_t error)
{}

}

static etcpak_force_inline uint64_t ProcessRGB( const uint8_t* src )
{}

#ifdef __AVX2__
// horizontal min/max functions. https://stackoverflow.com/questions/22256525/horizontal-minimum-and-maximum-using-sse
// if an error occurs in GCC, please change the value of -march in CFLAGS to a specific value for your CPU (e.g., skylake).
static inline int16_t hMax( __m128i buffer, uint8_t& idx )
{
    __m128i tmp1 = _mm_sub_epi8( _mm_set1_epi8( (char)( 255 ) ), buffer );
    __m128i tmp2 = _mm_min_epu8( tmp1, _mm_srli_epi16( tmp1, 8 ) );
    __m128i tmp3 = _mm_minpos_epu16( tmp2 );
    uint8_t result = 255 - (uint8_t)_mm_cvtsi128_si32( tmp3 );
    __m128i mask = _mm_cmpeq_epi8( buffer, _mm_set1_epi8( result ) );
    idx = _tzcnt_u32( _mm_movemask_epi8( mask ) );

    return result;
}
#elif defined __ARM_NEON && defined __aarch64__
static inline int16_t hMax( uint8x16_t buffer, uint8_t& idx )
{
    const uint8_t max = vmaxvq_u8( buffer );
    const uint16x8_t vmax = vdupq_n_u16( max );
    uint8x16x2_t buff_wide = vzipq_u8( buffer, uint8x16_t() );
    uint16x8_t lowbuf16 = vreinterpretq_u16_u8( buff_wide.val[0] );
    uint16x8_t hibuf16 = vreinterpretq_u16_u8( buff_wide.val[1] );
    uint16x8_t low_eqmask = vceqq_u16( lowbuf16, vmax );
    uint16x8_t hi_eqmask = vceqq_u16( hibuf16, vmax );

    static const uint16_t mask_lsb[] = {
	    0x1, 0x2, 0x4, 0x8,
	    0x10, 0x20, 0x40, 0x80 };

    static const uint16_t mask_msb[] = {
	    0x100, 0x200, 0x400, 0x800,
	    0x1000, 0x2000, 0x4000, 0x8000 };

    uint16x8_t vmask_lsb = vld1q_u16( mask_lsb );
    uint16x8_t vmask_msb = vld1q_u16( mask_msb );
    uint16x8_t pos_lsb = vandq_u16( vmask_lsb, low_eqmask );
    uint16x8_t pos_msb = vandq_u16( vmask_msb, hi_eqmask );
    pos_lsb = vpaddq_u16( pos_lsb, pos_lsb );
    pos_lsb = vpaddq_u16( pos_lsb, pos_lsb );
    pos_lsb = vpaddq_u16( pos_lsb, pos_lsb );
    uint64_t idx_lane1 = vgetq_lane_u64( vreinterpretq_u64_u16( pos_lsb ), 0 );
    pos_msb = vpaddq_u16( pos_msb, pos_msb );
    pos_msb = vpaddq_u16( pos_msb, pos_msb );
    pos_msb = vpaddq_u16( pos_msb, pos_msb );
    uint32_t idx_lane2 = vgetq_lane_u32( vreinterpretq_u32_u16( pos_msb ), 0 );
    idx = idx_lane1 != 0 ? __builtin_ctz( idx_lane1 ) : __builtin_ctz( idx_lane2 );

    return max;
}
#endif

#ifdef __AVX2__
static inline int16_t hMin( __m128i buffer, uint8_t& idx )
{
    __m128i tmp2 = _mm_min_epu8( buffer, _mm_srli_epi16( buffer, 8 ) );
    __m128i tmp3 = _mm_minpos_epu16( tmp2 );
    uint8_t result = (uint8_t)_mm_cvtsi128_si32( tmp3 );
    __m128i mask = _mm_cmpeq_epi8( buffer, _mm_set1_epi8( result ) );
    idx = _tzcnt_u32( _mm_movemask_epi8( mask ) );
    return result;
}
#elif defined __ARM_NEON && defined __aarch64__
static inline int16_t hMin( uint8x16_t buffer, uint8_t& idx )
{
    const uint8_t min = vminvq_u8( buffer );
    const uint16x8_t vmin = vdupq_n_u16( min );
    uint8x16x2_t buff_wide = vzipq_u8( buffer, uint8x16_t() );
    uint16x8_t lowbuf16 = vreinterpretq_u16_u8( buff_wide.val[0] );
    uint16x8_t hibuf16 = vreinterpretq_u16_u8( buff_wide.val[1] );
    uint16x8_t low_eqmask = vceqq_u16( lowbuf16, vmin );
    uint16x8_t hi_eqmask = vceqq_u16( hibuf16, vmin );

    static const uint16_t mask_lsb[] = {
	    0x1, 0x2, 0x4, 0x8,
	    0x10, 0x20, 0x40, 0x80 };

    static const uint16_t mask_msb[] = {
	    0x100, 0x200, 0x400, 0x800,
	    0x1000, 0x2000, 0x4000, 0x8000 };

    uint16x8_t vmask_lsb = vld1q_u16( mask_lsb );
    uint16x8_t vmask_msb = vld1q_u16( mask_msb );
    uint16x8_t pos_lsb = vandq_u16( vmask_lsb, low_eqmask );
    uint16x8_t pos_msb = vandq_u16( vmask_msb, hi_eqmask );
    pos_lsb = vpaddq_u16( pos_lsb, pos_lsb );
    pos_lsb = vpaddq_u16( pos_lsb, pos_lsb );
    pos_lsb = vpaddq_u16( pos_lsb, pos_lsb );
    uint64_t idx_lane1 = vgetq_lane_u64( vreinterpretq_u64_u16( pos_lsb ), 0 );
    pos_msb = vpaddq_u16( pos_msb, pos_msb );
    pos_msb = vpaddq_u16( pos_msb, pos_msb );
    pos_msb = vpaddq_u16( pos_msb, pos_msb );
    uint32_t idx_lane2 = vgetq_lane_u32( vreinterpretq_u32_u16( pos_msb ), 0 );
    idx = idx_lane1 != 0 ? __builtin_ctz( idx_lane1 ) : __builtin_ctz( idx_lane2 );

    return min;
}
#endif

// During search it is not convenient to store the bits the way they are stored in the
// file format. Hence, after search, it is converted to this format.
// NO WARRANTY --- SEE STATEMENT IN TOP OF FILE (C) Ericsson AB 2005-2013. All Rights Reserved.
static inline void stuff59bits( unsigned int thumbT59W1, unsigned int thumbT59W2, unsigned int& thumbTW1, unsigned int& thumbTW2 )
{}

// During search it is not convenient to store the bits the way they are stored in the
// file format. Hence, after search, it is converted to this format.
// NO WARRANTY --- SEE STATEMENT IN TOP OF FILE (C) Ericsson AB 2005-2013. All Rights Reserved.
static inline void stuff58bits( unsigned int thumbH58W1, unsigned int thumbH58W2, unsigned int& thumbHW1, unsigned int& thumbHW2 )
{}

#if defined __AVX2__ || (defined __ARM_NEON && defined __aarch64__)
static etcpak_force_inline Channels GetChannels( const uint8_t* src )
{
    Channels ch;
#ifdef __AVX2__
    __m128i d0 = _mm_loadu_si128( ( (__m128i*)src ) + 0 );
    __m128i d1 = _mm_loadu_si128( ( (__m128i*)src ) + 1 );
    __m128i d2 = _mm_loadu_si128( ( (__m128i*)src ) + 2 );
    __m128i d3 = _mm_loadu_si128( ( (__m128i*)src ) + 3 );

    __m128i rgb0 = _mm_shuffle_epi8( d0, _mm_setr_epi8( 0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, -1, -1, -1, -1 ) );
    __m128i rgb1 = _mm_shuffle_epi8( d1, _mm_setr_epi8( 0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, -1, -1, -1, -1 ) );
    __m128i rgb2 = _mm_shuffle_epi8( d2, _mm_setr_epi8( 0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, -1, -1, -1, -1 ) );
    __m128i rgb3 = _mm_shuffle_epi8( d3, _mm_setr_epi8( 0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, -1, -1, -1, -1 ) );

    __m128i rg0 = _mm_unpacklo_epi32( rgb0, rgb1 );
    __m128i rg1 = _mm_unpacklo_epi32( rgb2, rgb3 );
    __m128i b0 = _mm_unpackhi_epi32( rgb0, rgb1 );
    __m128i b1 = _mm_unpackhi_epi32( rgb2, rgb3 );

    // swap channels
    ch.b8 = _mm_unpacklo_epi64( rg0, rg1 );
    ch.g8 = _mm_unpackhi_epi64( rg0, rg1 );
    ch.r8 = _mm_unpacklo_epi64( b0, b1 );
#elif defined __ARM_NEON && defined __aarch64__
    //load pixel data into 4 rows
    uint8x16_t px0 = vld1q_u8( src + 0 );
    uint8x16_t px1 = vld1q_u8( src + 16 );
    uint8x16_t px2 = vld1q_u8( src + 32 );
    uint8x16_t px3 = vld1q_u8( src + 48 );

    uint8x16x2_t px0z1 = vzipq_u8( px0, px1 );
    uint8x16x2_t px2z3 = vzipq_u8( px2, px3 );
    uint8x16x2_t px01 = vzipq_u8( px0z1.val[0], px0z1.val[1] );
    uint8x16x2_t rgb01 = vzipq_u8( px01.val[0], px01.val[1] );
    uint8x16x2_t px23 = vzipq_u8( px2z3.val[0], px2z3.val[1] );
    uint8x16x2_t rgb23 = vzipq_u8( px23.val[0], px23.val[1] );

    uint8x16_t rr = vreinterpretq_u8_u64( vzip1q_u64( vreinterpretq_u64_u8( rgb01.val[0] ), vreinterpretq_u64_u8( rgb23.val[0] ) ) );
    uint8x16_t gg = vreinterpretq_u8_u64( vzip2q_u64( vreinterpretq_u64_u8( rgb01.val[0] ), vreinterpretq_u64_u8( rgb23.val[0] ) ) );
    uint8x16_t bb = vreinterpretq_u8_u64( vzip1q_u64( vreinterpretq_u64_u8( rgb01.val[1] ), vreinterpretq_u64_u8( rgb23.val[1] ) ) );

    uint8x16x2_t red = vzipq_u8( rr, uint8x16_t() );
    uint8x16x2_t grn = vzipq_u8( gg, uint8x16_t() );
    uint8x16x2_t blu = vzipq_u8( bb, uint8x16_t() );
    ch.r = red;
    ch.b = blu;
    ch.g = grn;
#endif
    return ch;
}
#endif

#if defined __AVX2__ || (defined __ARM_NEON && defined __aarch64__)
static etcpak_force_inline void CalculateLuma( Channels& ch, Luma& luma )
#else
static etcpak_force_inline void CalculateLuma( const uint8_t* src, Luma& luma )
#endif
{}

static etcpak_force_inline uint8_t SelectModeETC2( const Luma& luma )
{}

static etcpak_force_inline uint64_t ProcessRGB_ETC2( const uint8_t* src, bool useHeuristics )
{}

#ifdef __SSE4_1__
template<int K>
static etcpak_force_inline __m128i Widen( const __m128i src )
{
    static_assert( K >= 0 && K <= 7, "Index out of range" );

    __m128i tmp;
    switch( K )
    {
    case 0:
        tmp = _mm_shufflelo_epi16( src, _MM_SHUFFLE( 0, 0, 0, 0 ) );
        return _mm_shuffle_epi32( tmp, _MM_SHUFFLE( 0, 0, 0, 0 ) );
    case 1:
        tmp = _mm_shufflelo_epi16( src, _MM_SHUFFLE( 1, 1, 1, 1 ) );
        return _mm_shuffle_epi32( tmp, _MM_SHUFFLE( 0, 0, 0, 0 ) );
    case 2:
        tmp = _mm_shufflelo_epi16( src, _MM_SHUFFLE( 2, 2, 2, 2 ) );
        return _mm_shuffle_epi32( tmp, _MM_SHUFFLE( 0, 0, 0, 0 ) );
    case 3:
        tmp = _mm_shufflelo_epi16( src, _MM_SHUFFLE( 3, 3, 3, 3 ) );
        return _mm_shuffle_epi32( tmp, _MM_SHUFFLE( 0, 0, 0, 0 ) );
    case 4:
        tmp = _mm_shufflehi_epi16( src, _MM_SHUFFLE( 0, 0, 0, 0 ) );
        return _mm_shuffle_epi32( tmp, _MM_SHUFFLE( 2, 2, 2, 2 ) );
    case 5:
        tmp = _mm_shufflehi_epi16( src, _MM_SHUFFLE( 1, 1, 1, 1 ) );
        return _mm_shuffle_epi32( tmp, _MM_SHUFFLE( 2, 2, 2, 2 ) );
    case 6:
        tmp = _mm_shufflehi_epi16( src, _MM_SHUFFLE( 2, 2, 2, 2 ) );
        return _mm_shuffle_epi32( tmp, _MM_SHUFFLE( 2, 2, 2, 2 ) );
    case 7:
        tmp = _mm_shufflehi_epi16( src, _MM_SHUFFLE( 3, 3, 3, 3 ) );
        return _mm_shuffle_epi32( tmp, _MM_SHUFFLE( 2, 2, 2, 2 ) );
    }
}

static etcpak_force_inline int GetMulSel( int sel )
{
    switch( sel )
    {
    case 0:
        return 0;
    case 1:
    case 2:
    case 3:
        return 1;
    case 4:
        return 2;
    case 5:
    case 6:
    case 7:
        return 3;
    case 8:
    case 9:
    case 10:
    case 11:
    case 12:
    case 13:
        return 4;
    case 14:
    case 15:
        return 5;
    }
}

#endif

#ifdef __ARM_NEON

static constexpr etcpak_force_inline int GetMulSel(int sel)
{
    return ( sel < 1 ) ? 0 : ( sel < 4 ) ? 1 : ( sel < 5 ) ? 2 : ( sel < 8 ) ? 3 : ( sel < 14 ) ? 4 : 5;
}

static constexpr int ClampConstant( int x, int min, int max )
{
    return x < min ? min : x > max ? max : x;
}

template <int Index>
etcpak_force_inline static uint16x8_t ErrorProbe_EAC_NEON( uint8x8_t recVal, uint8x16_t alphaBlock )
{
    uint8x8_t srcValWide;
#ifndef __aarch64__
    if( Index < 8 )
        srcValWide = vdup_lane_u8( vget_low_u8( alphaBlock ), ClampConstant( Index, 0, 7 ) );
    else
        srcValWide = vdup_lane_u8( vget_high_u8( alphaBlock ), ClampConstant( Index - 8, 0, 7 ) );
#else
    srcValWide = vdup_laneq_u8( alphaBlock, Index );
#endif

    uint8x8_t deltaVal = vabd_u8( srcValWide, recVal );
    return vmull_u8( deltaVal, deltaVal );
}

etcpak_force_inline static uint16_t MinError_EAC_NEON( uint16x8_t errProbe )
{
#ifndef __aarch64__
    uint16x4_t tmpErr = vpmin_u16( vget_low_u16( errProbe ), vget_high_u16( errProbe ) );
    tmpErr = vpmin_u16( tmpErr, tmpErr );
    return vpmin_u16( tmpErr, tmpErr )[0];
#else
    return vminvq_u16( errProbe );
#endif
}

template <int Index>
etcpak_force_inline static uint64_t MinErrorIndex_EAC_NEON( uint8x8_t recVal, uint8x16_t alphaBlock )
{
    uint16x8_t errProbe = ErrorProbe_EAC_NEON<Index>( recVal, alphaBlock );
    uint16x8_t minErrMask = vceqq_u16( errProbe, vdupq_n_u16( MinError_EAC_NEON( errProbe ) ) );
    uint64_t idx = __builtin_ctzll( vget_lane_u64( vreinterpret_u64_u8( vqmovn_u16( minErrMask ) ), 0 ) );
    idx >>= 3;
    idx <<= 45 - Index * 3;

    return idx;
}

template <int Index>
etcpak_force_inline static int16x8_t WidenMultiplier_EAC_NEON( int16x8_t multipliers )
{
    constexpr int Lane = GetMulSel( Index );
#ifndef __aarch64__
    if( Lane < 4 )
        return vdupq_lane_s16( vget_low_s16( multipliers ), ClampConstant( Lane, 0, 3 ) );
    else
        return vdupq_lane_s16( vget_high_s16( multipliers ), ClampConstant( Lane - 4, 0, 3 ) );
#else
    return vdupq_laneq_s16( multipliers, Lane );
#endif
}

#endif

template<bool checkSolid = true>
static etcpak_force_inline uint64_t ProcessAlpha_ETC2( const uint8_t* src )
{}

void CompressEtc1Alpha( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width )
{}

void CompressEtc2Alpha( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width, bool useHeuristics )
{}

#include <chrono>
#include <thread>

void CompressEtc1Rgb( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width )
{}

void CompressEtc1RgbDither( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width )
{}

void CompressEtc2Rgb( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width, bool useHeuristics )
{}

void CompressEtc2Rgba( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width, bool useHeuristics )
{}

void CompressEacR( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width )
{}

void CompressEacRg( const uint32_t* src, uint64_t* dst, uint32_t blocks, size_t width )
{}