chromium/third_party/zstd/src/lib/compress/zstd_lazy.c

/*
 * Copyright (c) Meta Platforms, Inc. and affiliates.
 * All rights reserved.
 *
 * This source code is licensed under both the BSD-style license (found in the
 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
 * in the COPYING file in the root directory of this source tree).
 * You may select, at your option, one of the above-listed licenses.
 */

#include "zstd_compress_internal.h"
#include "zstd_lazy.h"
#include "../common/bits.h" /* ZSTD_countTrailingZeros64 */

#if !defined(ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR) \
 || !defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \
 || !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR) \
 || !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR)

#define kLazySkippingStep


/*-*************************************
*  Binary Tree search
***************************************/

static
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
void ZSTD_updateDUBT(ZSTD_matchState_t* ms,
                const BYTE* ip, const BYTE* iend,
                U32 mls)
{}


/** ZSTD_insertDUBT1() :
 *  sort one already inserted but unsorted position
 *  assumption : curr >= btlow == (curr - btmask)
 *  doesn't fail */
static
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
void ZSTD_insertDUBT1(const ZSTD_matchState_t* ms,
                 U32 curr, const BYTE* inputEnd,
                 U32 nbCompares, U32 btLow,
                 const ZSTD_dictMode_e dictMode)
{}


static
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_DUBT_findBetterDictMatch (
        const ZSTD_matchState_t* ms,
        const BYTE* const ip, const BYTE* const iend,
        size_t* offsetPtr,
        size_t bestLength,
        U32 nbCompares,
        U32 const mls,
        const ZSTD_dictMode_e dictMode)
{}


static
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
                        const BYTE* const ip, const BYTE* const iend,
                        size_t* offBasePtr,
                        U32 const mls,
                        const ZSTD_dictMode_e dictMode)
{}


/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms,
                const BYTE* const ip, const BYTE* const iLimit,
                      size_t* offBasePtr,
                const U32 mls /* template */,
                const ZSTD_dictMode_e dictMode)
{}

/***********************************
* Dedicated dict search
***********************************/

void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip)
{}

/* Returns the longest match length found in the dedicated dict search structure.
 * If none are longer than the argument ml, then ml will be returned.
 */
FORCE_INLINE_TEMPLATE
size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nbAttempts,
                                            const ZSTD_matchState_t* const dms,
                                            const BYTE* const ip, const BYTE* const iLimit,
                                            const BYTE* const prefixStart, const U32 curr,
                                            const U32 dictLimit, const size_t ddsIdx) {}


/* *********************************
*  Hash Chain
***********************************/
#define NEXT_IN_CHAIN(d, mask)

/* Update chains up to ip (excluded)
   Assumption : always within prefix (i.e. not within extDict) */
FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
U32 ZSTD_insertAndFindFirstIndex_internal(
                        ZSTD_matchState_t* ms,
                        const ZSTD_compressionParameters* const cParams,
                        const BYTE* ip, U32 const mls, U32 const lazySkipping)
{}

U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) {}

/* inlining is important to hardwire a hot branch (template emulation) */
FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_HcFindBestMatch(
                        ZSTD_matchState_t* ms,
                        const BYTE* const ip, const BYTE* const iLimit,
                        size_t* offsetPtr,
                        const U32 mls, const ZSTD_dictMode_e dictMode)
{}

/* *********************************
* (SIMD) Row-based matchfinder
***********************************/
/* Constants for row-based hash */
#define ZSTD_ROW_HASH_TAG_MASK
#define ZSTD_ROW_HASH_MAX_ENTRIES

#define ZSTD_ROW_HASH_CACHE_MASK

ZSTD_VecMask;   /* Clarifies when we are interacting with a U64 representing a mask of matches */

/* ZSTD_VecMask_next():
 * Starting from the LSB, returns the idx of the next non-zero bit.
 * Basically counting the nb of trailing zeroes.
 */
MEM_STATIC U32 ZSTD_VecMask_next(ZSTD_VecMask val) {}

/* ZSTD_row_nextIndex():
 * Returns the next index to insert at within a tagTable row, and updates the "head"
 * value to reflect the update. Essentially cycles backwards from [1, {entries per row})
 */
FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextIndex(BYTE* const tagRow, U32 const rowMask) {}

/* ZSTD_isAligned():
 * Checks that a pointer is aligned to "align" bytes which must be a power of 2.
 */
MEM_STATIC int ZSTD_isAligned(void const* ptr, size_t align) {}

/* ZSTD_row_prefetch():
 * Performs prefetching for the hashTable and tagTable at a given row.
 */
FORCE_INLINE_TEMPLATE void ZSTD_row_prefetch(U32 const* hashTable, BYTE const* tagTable, U32 const relRow, U32 const rowLog) {}

/* ZSTD_row_fillHashCache():
 * Fill up the hash cache starting at idx, prefetching up to ZSTD_ROW_HASH_CACHE_SIZE entries,
 * but not beyond iLimit.
 */
FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const BYTE* base,
                                   U32 const rowLog, U32 const mls,
                                   U32 idx, const BYTE* const iLimit)
{}

/* ZSTD_row_nextCachedHash():
 * Returns the hash of base + idx, and replaces the hash in the hash cache with the byte at
 * base + idx + ZSTD_ROW_HASH_CACHE_SIZE. Also prefetches the appropriate rows from hashTable and tagTable.
 */
FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
U32 ZSTD_row_nextCachedHash(U32* cache, U32 const* hashTable,
                                                  BYTE const* tagTable, BYTE const* base,
                                                  U32 idx, U32 const hashLog,
                                                  U32 const rowLog, U32 const mls,
                                                  U64 const hashSalt)
{}

/* ZSTD_row_update_internalImpl():
 * Updates the hash table with positions starting from updateStartIdx until updateEndIdx.
 */
FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
void ZSTD_row_update_internalImpl(ZSTD_matchState_t* ms,
                                  U32 updateStartIdx, U32 const updateEndIdx,
                                  U32 const mls, U32 const rowLog,
                                  U32 const rowMask, U32 const useCache)
{}

/* ZSTD_row_update_internal():
 * Inserts the byte at ip into the appropriate position in the hash table, and updates ms->nextToUpdate.
 * Skips sections of long matches as is necessary.
 */
FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
void ZSTD_row_update_internal(ZSTD_matchState_t* ms, const BYTE* ip,
                              U32 const mls, U32 const rowLog,
                              U32 const rowMask, U32 const useCache)
{}

/* ZSTD_row_update():
 * External wrapper for ZSTD_row_update_internal(). Used for filling the hashtable during dictionary
 * processing.
 */
void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip) {}

/* Returns the mask width of bits group of which will be set to 1. Given not all
 * architectures have easy movemask instruction, this helps to iterate over
 * groups of bits easier and faster.
 */
FORCE_INLINE_TEMPLATE U32
ZSTD_row_matchMaskGroupWidth(const U32 rowEntries)
{}

#if defined(ZSTD_ARCH_X86_SSE2)
FORCE_INLINE_TEMPLATE ZSTD_VecMask
ZSTD_row_getSSEMask(int nbChunks, const BYTE* const src, const BYTE tag, const U32 head)
{}
#endif

#if defined(ZSTD_ARCH_ARM_NEON)
FORCE_INLINE_TEMPLATE ZSTD_VecMask
ZSTD_row_getNEONMask(const U32 rowEntries, const BYTE* const src, const BYTE tag, const U32 headGrouped)
{
    assert((rowEntries == 16) || (rowEntries == 32) || rowEntries == 64);
    if (rowEntries == 16) {
        /* vshrn_n_u16 shifts by 4 every u16 and narrows to 8 lower bits.
         * After that groups of 4 bits represent the equalMask. We lower
         * all bits except the highest in these groups by doing AND with
         * 0x88 = 0b10001000.
         */
        const uint8x16_t chunk = vld1q_u8(src);
        const uint16x8_t equalMask = vreinterpretq_u16_u8(vceqq_u8(chunk, vdupq_n_u8(tag)));
        const uint8x8_t res = vshrn_n_u16(equalMask, 4);
        const U64 matches = vget_lane_u64(vreinterpret_u64_u8(res), 0);
        return ZSTD_rotateRight_U64(matches, headGrouped) & 0x8888888888888888ull;
    } else if (rowEntries == 32) {
        /* Same idea as with rowEntries == 16 but doing AND with
         * 0x55 = 0b01010101.
         */
        const uint16x8x2_t chunk = vld2q_u16((const uint16_t*)(const void*)src);
        const uint8x16_t chunk0 = vreinterpretq_u8_u16(chunk.val[0]);
        const uint8x16_t chunk1 = vreinterpretq_u8_u16(chunk.val[1]);
        const uint8x16_t dup = vdupq_n_u8(tag);
        const uint8x8_t t0 = vshrn_n_u16(vreinterpretq_u16_u8(vceqq_u8(chunk0, dup)), 6);
        const uint8x8_t t1 = vshrn_n_u16(vreinterpretq_u16_u8(vceqq_u8(chunk1, dup)), 6);
        const uint8x8_t res = vsli_n_u8(t0, t1, 4);
        const U64 matches = vget_lane_u64(vreinterpret_u64_u8(res), 0) ;
        return ZSTD_rotateRight_U64(matches, headGrouped) & 0x5555555555555555ull;
    } else { /* rowEntries == 64 */
        const uint8x16x4_t chunk = vld4q_u8(src);
        const uint8x16_t dup = vdupq_n_u8(tag);
        const uint8x16_t cmp0 = vceqq_u8(chunk.val[0], dup);
        const uint8x16_t cmp1 = vceqq_u8(chunk.val[1], dup);
        const uint8x16_t cmp2 = vceqq_u8(chunk.val[2], dup);
        const uint8x16_t cmp3 = vceqq_u8(chunk.val[3], dup);

        const uint8x16_t t0 = vsriq_n_u8(cmp1, cmp0, 1);
        const uint8x16_t t1 = vsriq_n_u8(cmp3, cmp2, 1);
        const uint8x16_t t2 = vsriq_n_u8(t1, t0, 2);
        const uint8x16_t t3 = vsriq_n_u8(t2, t2, 4);
        const uint8x8_t t4 = vshrn_n_u16(vreinterpretq_u16_u8(t3), 4);
        const U64 matches = vget_lane_u64(vreinterpret_u64_u8(t4), 0);
        return ZSTD_rotateRight_U64(matches, headGrouped);
    }
}
#endif

/* Returns a ZSTD_VecMask (U64) that has the nth group (determined by
 * ZSTD_row_matchMaskGroupWidth) of bits set to 1 if the newly-computed "tag"
 * matches the hash at the nth position in a row of the tagTable.
 * Each row is a circular buffer beginning at the value of "headGrouped". So we
 * must rotate the "matches" bitfield to match up with the actual layout of the
 * entries within the hashTable */
FORCE_INLINE_TEMPLATE ZSTD_VecMask
ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 headGrouped, const U32 rowEntries)
{}

/* The high-level approach of the SIMD row based match finder is as follows:
 * - Figure out where to insert the new entry:
 *      - Generate a hash for current input position and split it into a one byte of tag and `rowHashLog` bits of index.
 *           - The hash is salted by a value that changes on every context reset, so when the same table is used
 *             we will avoid collisions that would otherwise slow us down by introducing phantom matches.
 *      - The hashTable is effectively split into groups or "rows" of 15 or 31 entries of U32, and the index determines
 *        which row to insert into.
 *      - Determine the correct position within the row to insert the entry into. Each row of 15 or 31 can
 *        be considered as a circular buffer with a "head" index that resides in the tagTable (overall 16 or 32 bytes
 *        per row).
 * - Use SIMD to efficiently compare the tags in the tagTable to the 1-byte tag calculated for the position and
 *   generate a bitfield that we can cycle through to check the collisions in the hash table.
 * - Pick the longest match.
 * - Insert the tag into the equivalent row and position in the tagTable.
 */
FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_RowFindBestMatch(
                        ZSTD_matchState_t* ms,
                        const BYTE* const ip, const BYTE* const iLimit,
                        size_t* offsetPtr,
                        const U32 mls, const ZSTD_dictMode_e dictMode,
                        const U32 rowLog)
{}


/**
 * Generate search functions templated on (dictMode, mls, rowLog).
 * These functions are outlined for code size & compilation time.
 * ZSTD_searchMax() dispatches to the correct implementation function.
 *
 * TODO: The start of the search function involves loading and calculating a
 * bunch of constants from the ZSTD_matchState_t. These computations could be
 * done in an initialization function, and saved somewhere in the match state.
 * Then we could pass a pointer to the saved state instead of the match state,
 * and avoid duplicate computations.
 *
 * TODO: Move the match re-winding into searchMax. This improves compression
 * ratio, and unlocks further simplifications with the next TODO.
 *
 * TODO: Try moving the repcode search into searchMax. After the re-winding
 * and repcode search are in searchMax, there is no more logic in the match
 * finder loop that requires knowledge about the dictMode. So we should be
 * able to avoid force inlining it, and we can join the extDict loop with
 * the single segment loop. It should go in searchMax instead of its own
 * function to avoid having multiple virtual function calls per search.
 */

#define ZSTD_BT_SEARCH_FN(dictMode, mls)
#define ZSTD_HC_SEARCH_FN(dictMode, mls)
#define ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)

#define ZSTD_SEARCH_FN_ATTRS

#define GEN_ZSTD_BT_SEARCH_FN(dictMode, mls)                                                                                  \

#define GEN_ZSTD_HC_SEARCH_FN(dictMode, mls)                                                                                 \

#define GEN_ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)                                                                                          \

#define ZSTD_FOR_EACH_ROWLOG(X, dictMode, mls)

#define ZSTD_FOR_EACH_MLS_ROWLOG(X, dictMode)

#define ZSTD_FOR_EACH_MLS(X, dictMode)

#define ZSTD_FOR_EACH_DICT_MODE(X, ...)

/* Generate row search fns for each combination of (dictMode, mls, rowLog) */
ZSTD_FOR_EACH_DICT_MODE()
/* Generate binary Tree search fns for each combination of (dictMode, mls) */
ZSTD_FOR_EACH_DICT_MODE()
/* Generate hash chain search fns for each combination of (dictMode, mls) */
ZSTD_FOR_EACH_DICT_MODE()

searchMethod_e;

#define GEN_ZSTD_CALL_BT_SEARCH_FN(dictMode, mls)
#define GEN_ZSTD_CALL_HC_SEARCH_FN(dictMode, mls)
#define GEN_ZSTD_CALL_ROW_SEARCH_FN(dictMode, mls, rowLog)

#define ZSTD_SWITCH_MLS(X, dictMode)

#define ZSTD_SWITCH_ROWLOG(dictMode, mls)

#define ZSTD_SWITCH_SEARCH_METHOD(dictMode)

/**
 * Searches for the longest match at @p ip.
 * Dispatches to the correct implementation function based on the
 * (searchMethod, dictMode, mls, rowLog). We use switch statements
 * here instead of using an indirect function call through a function
 * pointer because after Spectre and Meltdown mitigations, indirect
 * function calls can be very costly, especially in the kernel.
 *
 * NOTE: dictMode and searchMethod should be templated, so those switch
 * statements should be optimized out. Only the mls & rowLog switches
 * should be left.
 *
 * @param ms The match state.
 * @param ip The position to search at.
 * @param iend The end of the input data.
 * @param[out] offsetPtr Stores the match offset into this pointer.
 * @param mls The minimum search length, in the range [4, 6].
 * @param rowLog The row log (if applicable), in the range [4, 6].
 * @param searchMethod The search method to use (templated).
 * @param dictMode The dictMode (templated).
 *
 * @returns The length of the longest match found, or < mls if no match is found.
 * If a match is found its offset is stored in @p offsetPtr.
 */
FORCE_INLINE_TEMPLATE size_t ZSTD_searchMax(
    ZSTD_matchState_t* ms,
    const BYTE* ip,
    const BYTE* iend,
    size_t* offsetPtr,
    U32 const mls,
    U32 const rowLog,
    searchMethod_e const searchMethod,
    ZSTD_dictMode_e const dictMode)
{}

/* *******************************
*  Common parser - lazy strategy
*********************************/

FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_compressBlock_lazy_generic(
                        ZSTD_matchState_t* ms, seqStore_t* seqStore,
                        U32 rep[ZSTD_REP_NUM],
                        const void* src, size_t srcSize,
                        const searchMethod_e searchMethod, const U32 depth,
                        ZSTD_dictMode_e const dictMode)
{}
#endif /* build exclusions */


#ifndef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_greedy(
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
        void const* src, size_t srcSize)
{}

size_t ZSTD_compressBlock_greedy_dictMatchState(
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
        void const* src, size_t srcSize)
{}

size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
        void const* src, size_t srcSize)
{}

size_t ZSTD_compressBlock_greedy_row(
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
        void const* src, size_t srcSize)
{}

size_t ZSTD_compressBlock_greedy_dictMatchState_row(
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
        void const* src, size_t srcSize)
{}

size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
        void const* src, size_t srcSize)
{}
#endif

#ifndef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_lazy(
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
        void const* src, size_t srcSize)
{}

size_t ZSTD_compressBlock_lazy_dictMatchState(
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
        void const* src, size_t srcSize)
{}

size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
        void const* src, size_t srcSize)
{}

size_t ZSTD_compressBlock_lazy_row(
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
        void const* src, size_t srcSize)
{}

size_t ZSTD_compressBlock_lazy_dictMatchState_row(
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
        void const* src, size_t srcSize)
{}

size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row(
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
        void const* src, size_t srcSize)
{}
#endif

#ifndef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_lazy2(
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
        void const* src, size_t srcSize)
{}

size_t ZSTD_compressBlock_lazy2_dictMatchState(
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
        void const* src, size_t srcSize)
{}

size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
        void const* src, size_t srcSize)
{}

size_t ZSTD_compressBlock_lazy2_row(
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
        void const* src, size_t srcSize)
{}

size_t ZSTD_compressBlock_lazy2_dictMatchState_row(
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
        void const* src, size_t srcSize)
{}

size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row(
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
        void const* src, size_t srcSize)
{}
#endif

#ifndef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_btlazy2(
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
        void const* src, size_t srcSize)
{}

size_t ZSTD_compressBlock_btlazy2_dictMatchState(
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
        void const* src, size_t srcSize)
{}
#endif

#if !defined(ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR) \
 || !defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \
 || !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR) \
 || !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR)
FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_compressBlock_lazy_extDict_generic(
                        ZSTD_matchState_t* ms, seqStore_t* seqStore,
                        U32 rep[ZSTD_REP_NUM],
                        const void* src, size_t srcSize,
                        const searchMethod_e searchMethod, const U32 depth)
{}
#endif /* build exclusions */

#ifndef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_greedy_extDict(
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
        void const* src, size_t srcSize)
{}

size_t ZSTD_compressBlock_greedy_extDict_row(
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
        void const* src, size_t srcSize)
{}
#endif

#ifndef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_lazy_extDict(
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
        void const* src, size_t srcSize)

{}

size_t ZSTD_compressBlock_lazy_extDict_row(
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
        void const* src, size_t srcSize)

{}
#endif

#ifndef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_lazy2_extDict(
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
        void const* src, size_t srcSize)

{}

size_t ZSTD_compressBlock_lazy2_extDict_row(
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
        void const* src, size_t srcSize)
{}
#endif

#ifndef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR
size_t ZSTD_compressBlock_btlazy2_extDict(
        ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
        void const* src, size_t srcSize)

{}
#endif