chromium/third_party/lzma_sdk/C/LzmaEnc.c

/* LzmaEnc.c -- LZMA Encoder
2022-07-15: Igor Pavlov : Public domain */

#include "Precomp.h"

#include <string.h>

/* #define SHOW_STAT */
/* #define SHOW_STAT2 */

#if defined(SHOW_STAT) || defined(SHOW_STAT2)
#include <stdio.h>
#endif

#include "CpuArch.h"
#include "LzmaEnc.h"

#include "LzFind.h"
#ifndef _7ZIP_ST
#include "LzFindMt.h"
#endif

/* the following LzmaEnc_* declarations is internal LZMA interface for LZMA2 encoder */

SRes LzmaEnc_PrepareForLzma2(CLzmaEncHandle pp, ISeqInStream *inStream, UInt32 keepWindowSize,
    ISzAllocPtr alloc, ISzAllocPtr allocBig);
SRes LzmaEnc_MemPrepare(CLzmaEncHandle pp, const Byte *src, SizeT srcLen,
    UInt32 keepWindowSize, ISzAllocPtr alloc, ISzAllocPtr allocBig);
SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle pp, BoolInt reInit,
    Byte *dest, size_t *destLen, UInt32 desiredPackSize, UInt32 *unpackSize);
const Byte *LzmaEnc_GetCurBuf(CLzmaEncHandle pp);
void LzmaEnc_Finish(CLzmaEncHandle pp);
void LzmaEnc_SaveState(CLzmaEncHandle pp);
void LzmaEnc_RestoreState(CLzmaEncHandle pp);

#ifdef SHOW_STAT
static unsigned g_STAT_OFFSET = 0;
#endif

/* for good normalization speed we still reserve 256 MB before 4 GB range */
#define kLzmaMaxHistorySize

#define kNumTopBits
#define kTopValue

#define kNumBitModelTotalBits
#define kBitModelTotal
#define kNumMoveBits
#define kProbInitValue

#define kNumMoveReducingBits
#define kNumBitPriceShiftBits
// #define kBitPrice (1 << kNumBitPriceShiftBits)

#define REP_LEN_COUNT

void LzmaEncProps_Init(CLzmaEncProps *p)
{}

void LzmaEncProps_Normalize(CLzmaEncProps *p)
{}

UInt32 LzmaEncProps_GetDictSize(const CLzmaEncProps *props2)
{}


/*
x86/x64:

BSR:
  IF (SRC == 0) ZF = 1, DEST is undefined;
                  AMD : DEST is unchanged;
  IF (SRC != 0) ZF = 0; DEST is index of top non-zero bit
  BSR is slow in some processors

LZCNT:
  IF (SRC  == 0) CF = 1, DEST is size_in_bits_of_register(src) (32 or 64)
  IF (SRC  != 0) CF = 0, DEST = num_lead_zero_bits
  IF (DEST == 0) ZF = 1;

LZCNT works only in new processors starting from Haswell.
if LZCNT is not supported by processor, then it's executed as BSR.
LZCNT can be faster than BSR, if supported.
*/

// #define LZMA_LOG_BSR

#if defined(MY_CPU_ARM_OR_ARM64) /* || defined(MY_CPU_X86_OR_AMD64) */

  #if (defined(__clang__) && (__clang_major__ >= 6)) \
      || (defined(__GNUC__) && (__GNUC__ >= 6))
      #define LZMA_LOG_BSR
  #elif defined(_MSC_VER) && (_MSC_VER >= 1300)
    // #if defined(MY_CPU_ARM_OR_ARM64)
      #define LZMA_LOG_BSR
    // #endif
  #endif
#endif

// #include <intrin.h>

#ifdef LZMA_LOG_BSR

#if defined(__clang__) \
    || defined(__GNUC__)

/*
  C code:                  : (30 - __builtin_clz(x))
    gcc9/gcc10 for x64 /x86  : 30 - (bsr(x) xor 31)
    clang10 for x64          : 31 + (bsr(x) xor -32)
*/

  #define MY_clz
  // __lzcnt32
  // __builtin_ia32_lzcnt_u32

#else  // #if defined(_MSC_VER)

  #ifdef MY_CPU_ARM_OR_ARM64

    #define MY_clz

  #else // if defined(MY_CPU_X86_OR_AMD64)

    // #define MY_clz  __lzcnt  // we can use lzcnt (unsupported by old CPU)
    // _BitScanReverse code is not optimal for some MSVC compilers
    #define BSR2_RET

  #endif // MY_CPU_X86_OR_AMD64

#endif // _MSC_VER


#ifndef BSR2_RET

    #define BSR2_RET

#endif


unsigned GetPosSlot1(UInt32 pos);
unsigned GetPosSlot1(UInt32 pos)
{
  unsigned res;
  BSR2_RET(pos, res);
  return res;
}
#define GetPosSlot2
#define GetPosSlot


#else // ! LZMA_LOG_BSR

#define kNumLogBits

#define kDicLogSizeMaxCompress

static void LzmaEnc_FastPosInit(Byte *g_FastPos)
{}

/* we can use ((limit - pos) >> 31) only if (pos < ((UInt32)1 << 31)) */
/*
#define BSR2_RET(pos, res) { unsigned zz = 6 + ((kNumLogBits - 1) & \
  (0 - (((((UInt32)1 << (kNumLogBits + 6)) - 1) - pos) >> 31))); \
  res = p->g_FastPos[pos >> zz] + (zz * 2); }
*/

/*
#define BSR2_RET(pos, res) { unsigned zz = 6 + ((kNumLogBits - 1) & \
  (0 - (((((UInt32)1 << (kNumLogBits)) - 1) - (pos >> 6)) >> 31))); \
  res = p->g_FastPos[pos >> zz] + (zz * 2); }
*/

#define BSR2_RET(pos, res)

/*
#define BSR2_RET(pos, res) { res = (pos < (1 << (kNumLogBits + 6))) ? \
  p->g_FastPos[pos >> 6] + 12 : \
  p->g_FastPos[pos >> (6 + kNumLogBits - 1)] + (6 + (kNumLogBits - 1)) * 2; }
*/

#define GetPosSlot1(pos)
#define GetPosSlot2(pos, res)
#define GetPosSlot(pos, res)

#endif // LZMA_LOG_BSR


#define LZMA_NUM_REPS

CState;
CExtra;

COptimal;


// 18.06
#define kNumOpts
#define kPackReserve
// #define kNumOpts (1 << 12)
// #define kPackReserve (1 + kNumOpts * 2)

#define kNumLenToPosStates
#define kNumPosSlotBits
// #define kDicLogSizeMin 0
#define kDicLogSizeMax
#define kDistTableSizeMax

#define kNumAlignBits
#define kAlignTableSize
#define kAlignMask

#define kStartPosModelIndex
#define kEndPosModelIndex
#define kNumFullDistances

CLzmaProb;

#define LZMA_PB_MAX
#define LZMA_LC_MAX
#define LZMA_LP_MAX

#define LZMA_NUM_PB_STATES_MAX

#define kLenNumLowBits
#define kLenNumLowSymbols
#define kLenNumHighBits
#define kLenNumHighSymbols
#define kLenNumSymbolsTotal

#define LZMA_MATCH_LEN_MIN
#define LZMA_MATCH_LEN_MAX

#define kNumStates


CLenEnc;


CLenPriceEnc;

#define GET_PRICE_LEN(p, posState, len)

/*
#define GET_PRICE_LEN(p, posState, len) \
    ((p)->prices2[(size_t)(len) - 2] + ((p)->prices1[posState][((len) - 2) & (kLenNumLowSymbols * 2 - 1)] & (((len) - 2 - kLenNumLowSymbols * 2) >> 9)))
*/

CRangeEnc;


CSaveState;


CProbPrice;


CLzmaEnc;


#define MFB
/*
#ifndef _7ZIP_ST
#define MFB (p->matchFinderMt.MatchFinder)
#endif
*/

#define COPY_ARR(dest, src, arr)

void LzmaEnc_SaveState(CLzmaEncHandle pp)
{}


void LzmaEnc_RestoreState(CLzmaEncHandle pp)
{}



SRes LzmaEnc_SetProps(CLzmaEncHandle pp, const CLzmaEncProps *props2)
{}


void LzmaEnc_SetDataSize(CLzmaEncHandle pp, UInt64 expectedDataSiize)
{}


#define kState_Start
#define kState_LitAfterMatch
#define kState_LitAfterRep
#define kState_MatchAfterLit
#define kState_RepAfterLit

static const Byte kLiteralNextStates[kNumStates] =;
static const Byte kMatchNextStates[kNumStates]   =;
static const Byte kRepNextStates[kNumStates]     =;
static const Byte kShortRepNextStates[kNumStates]=;

#define IsLitState(s)
#define GetLenToPosState2(len)
#define GetLenToPosState(len)

#define kInfinityPrice

static void RangeEnc_Construct(CRangeEnc *p)
{}

#define RangeEnc_GetProcessed(p)
#define RangeEnc_GetProcessed_sizet(p)

#define RC_BUF_SIZE

static int RangeEnc_Alloc(CRangeEnc *p, ISzAllocPtr alloc)
{}

static void RangeEnc_Free(CRangeEnc *p, ISzAllocPtr alloc)
{}

static void RangeEnc_Init(CRangeEnc *p)
{}

MY_NO_INLINE static void RangeEnc_FlushStream(CRangeEnc *p)
{}

MY_NO_INLINE static void MY_FAST_CALL RangeEnc_ShiftLow(CRangeEnc *p)
{}

static void RangeEnc_FlushData(CRangeEnc *p)
{}

#define RC_NORM(p)

#define RC_BIT_PRE(p, prob)

// #define _LZMA_ENC_USE_BRANCH

#ifdef _LZMA_ENC_USE_BRANCH

#define RC_BIT

#else

#define RC_BIT(p, prob, bit)

#endif




#define RC_BIT_0_BASE(p, prob)

#define RC_BIT_1_BASE(p, prob) \

#define RC_BIT_0(p, prob)

#define RC_BIT_1(p, prob)

static void RangeEnc_EncodeBit_0(CRangeEnc *p, CLzmaProb *prob)
{}

static void LitEnc_Encode(CRangeEnc *p, CLzmaProb *probs, UInt32 sym)
{}

static void LitEnc_EncodeMatched(CRangeEnc *p, CLzmaProb *probs, UInt32 sym, UInt32 matchByte)
{}



static void LzmaEnc_InitPriceTables(CProbPrice *ProbPrices)
{}


#define GET_PRICE(prob, bit)

#define GET_PRICEa(prob, bit)

#define GET_PRICE_0(prob)
#define GET_PRICE_1(prob)

#define GET_PRICEa_0(prob)
#define GET_PRICEa_1(prob)


static UInt32 LitEnc_GetPrice(const CLzmaProb *probs, UInt32 sym, const CProbPrice *ProbPrices)
{}


static UInt32 LitEnc_Matched_GetPrice(const CLzmaProb *probs, UInt32 sym, UInt32 matchByte, const CProbPrice *ProbPrices)
{}


static void RcTree_ReverseEncode(CRangeEnc *rc, CLzmaProb *probs, unsigned numBits, unsigned sym)
{}



static void LenEnc_Init(CLenEnc *p)
{}

static void LenEnc_Encode(CLenEnc *p, CRangeEnc *rc, unsigned sym, unsigned posState)
{}

static void SetPrices_3(const CLzmaProb *probs, UInt32 startPrice, UInt32 *prices, const CProbPrice *ProbPrices)
{}


MY_NO_INLINE static void MY_FAST_CALL LenPriceEnc_UpdateTables(
    CLenPriceEnc *p,
    unsigned numPosStates,
    const CLenEnc *enc,
    const CProbPrice *ProbPrices)
{}

/*
  #ifdef SHOW_STAT
  g_STAT_OFFSET += num;
  printf("\n MovePos %u", num);
  #endif
*/
  
#define MOVE_POS(p, num)


static unsigned ReadMatchDistances(CLzmaEnc *p, unsigned *numPairsRes)
{}

#define MARK_LIT

#define MakeAs_Lit(p)
#define MakeAs_ShortRep(p)
#define IsShortRep(p)


#define GetPrice_ShortRep(p, state, posState)

#define GetPrice_Rep_0(p, state, posState)
  
MY_FORCE_INLINE
static UInt32 GetPrice_PureRep(const CLzmaEnc *p, unsigned repIndex, size_t state, size_t posState)
{}


static unsigned Backward(CLzmaEnc *p, unsigned cur)
{}



#define LIT_PROBS(pos, prevByte)


static unsigned GetOptimum(CLzmaEnc *p, UInt32 position)
{}



#define ChangePair(smallDist, bigDist)



static unsigned GetOptimumFast(CLzmaEnc *p)
{}




static void WriteEndMarker(CLzmaEnc *p, unsigned posState)
{}


static SRes CheckErrors(CLzmaEnc *p)
{}


MY_NO_INLINE static SRes Flush(CLzmaEnc *p, UInt32 nowPos)
{}


MY_NO_INLINE static void FillAlignPrices(CLzmaEnc *p)
{}


MY_NO_INLINE static void FillDistancesPrices(CLzmaEnc *p)
{}



static void LzmaEnc_Construct(CLzmaEnc *p)
{}

CLzmaEncHandle LzmaEnc_Create(ISzAllocPtr alloc)
{}

static void LzmaEnc_FreeLits(CLzmaEnc *p, ISzAllocPtr alloc)
{}

static void LzmaEnc_Destruct(CLzmaEnc *p, ISzAllocPtr alloc, ISzAllocPtr allocBig)
{}

void LzmaEnc_Destroy(CLzmaEncHandle p, ISzAllocPtr alloc, ISzAllocPtr allocBig)
{}


MY_NO_INLINE
static SRes LzmaEnc_CodeOneBlock(CLzmaEnc *p, UInt32 maxPackSize, UInt32 maxUnpackSize)
{}



#define kBigHashDicLimit

static SRes LzmaEnc_Alloc(CLzmaEnc *p, UInt32 keepWindowSize, ISzAllocPtr alloc, ISzAllocPtr allocBig)
{}

static void LzmaEnc_Init(CLzmaEnc *p)
{}


static void LzmaEnc_InitPrices(CLzmaEnc *p)
{}

static SRes LzmaEnc_AllocAndInit(CLzmaEnc *p, UInt32 keepWindowSize, ISzAllocPtr alloc, ISzAllocPtr allocBig)
{}

static SRes LzmaEnc_Prepare(CLzmaEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStream,
    ISzAllocPtr alloc, ISzAllocPtr allocBig)
{}

SRes LzmaEnc_PrepareForLzma2(CLzmaEncHandle pp,
    ISeqInStream *inStream, UInt32 keepWindowSize,
    ISzAllocPtr alloc, ISzAllocPtr allocBig)
{}

static void LzmaEnc_SetInputBuf(CLzmaEnc *p, const Byte *src, SizeT srcLen)
{}

SRes LzmaEnc_MemPrepare(CLzmaEncHandle pp, const Byte *src, SizeT srcLen,
    UInt32 keepWindowSize, ISzAllocPtr alloc, ISzAllocPtr allocBig)
{}

void LzmaEnc_Finish(CLzmaEncHandle pp)
{}


CLzmaEnc_SeqOutStreamBuf;

static size_t SeqOutStreamBuf_Write(const ISeqOutStream *pp, const void *data, size_t size)
{}


/*
UInt32 LzmaEnc_GetNumAvailableBytes(CLzmaEncHandle pp)
{
  const CLzmaEnc *p = (CLzmaEnc *)pp;
  return p->matchFinder.GetNumAvailableBytes(p->matchFinderObj);
}
*/

const Byte *LzmaEnc_GetCurBuf(CLzmaEncHandle pp)
{}


// (desiredPackSize == 0) is not allowed
SRes LzmaEnc_CodeOneMemBlock(CLzmaEncHandle pp, BoolInt reInit,
    Byte *dest, size_t *destLen, UInt32 desiredPackSize, UInt32 *unpackSize)
{}


MY_NO_INLINE
static SRes LzmaEnc_Encode2(CLzmaEnc *p, ICompressProgress *progress)
{}


SRes LzmaEnc_Encode(CLzmaEncHandle pp, ISeqOutStream *outStream, ISeqInStream *inStream, ICompressProgress *progress,
    ISzAllocPtr alloc, ISzAllocPtr allocBig)
{}


SRes LzmaEnc_WriteProperties(CLzmaEncHandle pp, Byte *props, SizeT *size)
{}


unsigned LzmaEnc_IsWriteEndMark(CLzmaEncHandle pp)
{}


SRes LzmaEnc_MemEncode(CLzmaEncHandle pp, Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
    int writeEndMark, ICompressProgress *progress, ISzAllocPtr alloc, ISzAllocPtr allocBig)
{}


SRes LzmaEncode(Byte *dest, SizeT *destLen, const Byte *src, SizeT srcLen,
    const CLzmaEncProps *props, Byte *propsEncoded, SizeT *propsSize, int writeEndMark,
    ICompressProgress *progress, ISzAllocPtr alloc, ISzAllocPtr allocBig)
{}


/*
#ifndef _7ZIP_ST
void LzmaEnc_GetLzThreads(CLzmaEncHandle pp, HANDLE lz_threads[2])
{
  const CLzmaEnc *p = (CLzmaEnc *)pp;
  lz_threads[0] = p->matchFinderMt.hashSync.thread;
  lz_threads[1] = p->matchFinderMt.btSync.thread;
}
#endif
*/