chromium/third_party/ffmpeg/libavutil/intreadwrite.h

/*
 * This file is part of FFmpeg.
 *
 * FFmpeg is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * FFmpeg is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with FFmpeg; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */

#ifndef AVUTIL_INTREADWRITE_H
#define AVUTIL_INTREADWRITE_H

#include <stdint.h>
#include "libavutil/avconfig.h"
#include "attributes.h"
#include "bswap.h"

av_alias64;

av_alias32;

av_alias16;

/*
 * Arch-specific headers can provide any combination of
 * AV_[RW][BLN](16|24|32|48|64) and AV_(COPY|SWAP|ZERO)(64|128) macros.
 * Preprocessor symbols must be defined, even if these are implemented
 * as inline functions.
 *
 * R/W means read/write, B/L/N means big/little/native endianness.
 * The following macros require aligned access, compared to their
 * unaligned variants: AV_(COPY|SWAP|ZERO)(64|128), AV_[RW]N[8-64]A.
 * Incorrect usage may range from abysmal performance to crash
 * depending on the platform.
 *
 * The unaligned variants are AV_[RW][BLN][8-64] and AV_COPY*U.
 */

#ifdef HAVE_AV_CONFIG_H

#include "config.h"

#if   ARCH_ARM
#   include "arm/intreadwrite.h"
#elif ARCH_AVR32
#   include "avr32/intreadwrite.h"
#elif ARCH_MIPS
#   include "mips/intreadwrite.h"
#elif ARCH_PPC
#   include "ppc/intreadwrite.h"
#elif ARCH_X86
#   include "x86/intreadwrite.h"
#endif

#endif /* HAVE_AV_CONFIG_H */

/*
 * Map AV_RNXX <-> AV_R[BL]XX for all variants provided by per-arch headers.
 */

#if AV_HAVE_BIGENDIAN

#   if    defined(AV_RN16) && !defined(AV_RB16)
#define AV_RB16
#   elif !defined(AV_RN16) &&  defined(AV_RB16)
#define AV_RN16
#   endif

#   if    defined(AV_WN16) && !defined(AV_WB16)
#define AV_WB16
#   elif !defined(AV_WN16) &&  defined(AV_WB16)
#define AV_WN16
#   endif

#   if    defined(AV_RN24) && !defined(AV_RB24)
#define AV_RB24
#   elif !defined(AV_RN24) &&  defined(AV_RB24)
#define AV_RN24
#   endif

#   if    defined(AV_WN24) && !defined(AV_WB24)
#define AV_WB24
#   elif !defined(AV_WN24) &&  defined(AV_WB24)
#define AV_WN24
#   endif

#   if    defined(AV_RN32) && !defined(AV_RB32)
#define AV_RB32
#   elif !defined(AV_RN32) &&  defined(AV_RB32)
#define AV_RN32
#   endif

#   if    defined(AV_WN32) && !defined(AV_WB32)
#define AV_WB32
#   elif !defined(AV_WN32) &&  defined(AV_WB32)
#define AV_WN32
#   endif

#   if    defined(AV_RN48) && !defined(AV_RB48)
#define AV_RB48
#   elif !defined(AV_RN48) &&  defined(AV_RB48)
#define AV_RN48
#   endif

#   if    defined(AV_WN48) && !defined(AV_WB48)
#define AV_WB48
#   elif !defined(AV_WN48) &&  defined(AV_WB48)
#define AV_WN48
#   endif

#   if    defined(AV_RN64) && !defined(AV_RB64)
#define AV_RB64
#   elif !defined(AV_RN64) &&  defined(AV_RB64)
#define AV_RN64
#   endif

#   if    defined(AV_WN64) && !defined(AV_WB64)
#define AV_WB64
#   elif !defined(AV_WN64) &&  defined(AV_WB64)
#define AV_WN64
#   endif

#else /* AV_HAVE_BIGENDIAN */

#   if    defined(AV_RN16) && !defined(AV_RL16)
#define AV_RL16
#   elif !defined(AV_RN16) &&  defined(AV_RL16)
#define AV_RN16
#   endif

#   if    defined(AV_WN16) && !defined(AV_WL16)
#define AV_WL16
#   elif !defined(AV_WN16) &&  defined(AV_WL16)
#define AV_WN16
#   endif

#   if    defined(AV_RN24) && !defined(AV_RL24)
#define AV_RL24
#   elif !defined(AV_RN24) &&  defined(AV_RL24)
#define AV_RN24
#   endif

#   if    defined(AV_WN24) && !defined(AV_WL24)
#define AV_WL24
#   elif !defined(AV_WN24) &&  defined(AV_WL24)
#define AV_WN24
#   endif

#   if    defined(AV_RN32) && !defined(AV_RL32)
#define AV_RL32
#   elif !defined(AV_RN32) &&  defined(AV_RL32)
#define AV_RN32
#   endif

#   if    defined(AV_WN32) && !defined(AV_WL32)
#define AV_WL32
#   elif !defined(AV_WN32) &&  defined(AV_WL32)
#define AV_WN32
#   endif

#   if    defined(AV_RN48) && !defined(AV_RL48)
#define AV_RL48
#   elif !defined(AV_RN48) &&  defined(AV_RL48)
#define AV_RN48
#   endif

#   if    defined(AV_WN48) && !defined(AV_WL48)
#define AV_WL48
#   elif !defined(AV_WN48) &&  defined(AV_WL48)
#define AV_WN48
#   endif

#   if    defined(AV_RN64) && !defined(AV_RL64)
#define AV_RL64
#   elif !defined(AV_RN64) &&  defined(AV_RL64)
#define AV_RN64
#   endif

#   if    defined(AV_WN64) && !defined(AV_WL64)
#define AV_WL64
#   elif !defined(AV_WN64) &&  defined(AV_WL64)
#define AV_WN64
#   endif

#endif /* !AV_HAVE_BIGENDIAN */

/*
 * Define AV_[RW]N helper macros to simplify definitions not provided
 * by per-arch headers.
 */

#if defined(__GNUC__) || defined(__clang__)

unaligned_64 __attribute__((packed)) av_alias;
unaligned_32 __attribute__((packed)) av_alias;
unaligned_16 __attribute__((packed)) av_alias;

#define AV_RN(s, p)
#define AV_WN(s, p, v)

#elif defined(_MSC_VER) && (defined(_M_ARM) || defined(_M_X64) || defined(_M_ARM64)) && AV_HAVE_FAST_UNALIGNED

#define AV_RN
#define AV_WN

#elif AV_HAVE_FAST_UNALIGNED

#define AV_RN
#define AV_WN

#else

#ifndef AV_RB16
#define AV_RB16
#endif
#ifndef AV_WB16
#define AV_WB16
#endif

#ifndef AV_RL16
#define AV_RL16
#endif
#ifndef AV_WL16
#define AV_WL16
#endif

#ifndef AV_RB32
#define AV_RB32
#endif
#ifndef AV_WB32
#define AV_WB32
#endif

#ifndef AV_RL32
#define AV_RL32
#endif
#ifndef AV_WL32
#define AV_WL32
#endif

#ifndef AV_RB64
#define AV_RB64
#endif
#ifndef AV_WB64
#define AV_WB64
#endif

#ifndef AV_RL64
#define AV_RL64
#endif
#ifndef AV_WL64
#define AV_WL64
#endif

#if AV_HAVE_BIGENDIAN
#define AV_RN
#define AV_WN
#else
#define AV_RN
#define AV_WN
#endif

#endif /* HAVE_FAST_UNALIGNED */

#ifndef AV_RN16
#define AV_RN16(p)
#endif

#ifndef AV_RN32
#define AV_RN32(p)
#endif

#ifndef AV_RN64
#define AV_RN64(p)
#endif

#ifndef AV_WN16
#define AV_WN16(p, v)
#endif

#ifndef AV_WN32
#define AV_WN32(p, v)
#endif

#ifndef AV_WN64
#define AV_WN64(p, v)
#endif

#if AV_HAVE_BIGENDIAN
#define AV_RB
#define AV_WB
#define AV_RL
#define AV_WL
#else
#define AV_RB(s, p)
#define AV_WB(s, p, v)
#define AV_RL(s, p)
#define AV_WL(s, p, v)
#endif

#define AV_RB8(x)
#define AV_WB8(p, d)

#define AV_RL8(x)
#define AV_WL8(p, d)

#ifndef AV_RB16
#define AV_RB16(p)
#endif
#ifndef AV_WB16
#define AV_WB16(p, v)
#endif

#ifndef AV_RL16
#define AV_RL16(p)
#endif
#ifndef AV_WL16
#define AV_WL16(p, v)
#endif

#ifndef AV_RB32
#define AV_RB32(p)
#endif
#ifndef AV_WB32
#define AV_WB32(p, v)
#endif

#ifndef AV_RL32
#define AV_RL32(p)
#endif
#ifndef AV_WL32
#define AV_WL32(p, v)
#endif

#ifndef AV_RB64
#define AV_RB64(p)
#endif
#ifndef AV_WB64
#define AV_WB64(p, v)
#endif

#ifndef AV_RL64
#define AV_RL64(p)
#endif
#ifndef AV_WL64
#define AV_WL64(p, v)
#endif

#ifndef AV_RB24
#define AV_RB24(x)
#endif
#ifndef AV_WB24
#define AV_WB24(p, d)
#endif

#ifndef AV_RL24
#define AV_RL24(x)
#endif
#ifndef AV_WL24
#define AV_WL24(p, d)
#endif

#ifndef AV_RB48
#define AV_RB48(x)
#endif
#ifndef AV_WB48
#define AV_WB48(p, darg)
#endif

#ifndef AV_RL48
#define AV_RL48(x)
#endif
#ifndef AV_WL48
#define AV_WL48(p, darg)
#endif

/*
 * The AV_[RW]NA macros access naturally aligned data
 * in a type-safe way.
 */

#define AV_RNA(s, p)
#define AV_WNA(s, p, v)

#ifndef AV_RN16A
#define AV_RN16A(p)
#endif

#ifndef AV_RN32A
#define AV_RN32A(p)
#endif

#ifndef AV_RN64A
#define AV_RN64A(p)
#endif

#ifndef AV_WN16A
#define AV_WN16A(p, v)
#endif

#ifndef AV_WN32A
#define AV_WN32A(p, v)
#endif

#ifndef AV_WN64A
#define AV_WN64A(p, v)
#endif

#if AV_HAVE_BIGENDIAN
#define AV_RLA
#define AV_WLA
#else
#define AV_RLA(s, p)
#define AV_WLA(s, p, v)
#endif

#ifndef AV_RL64A
#define AV_RL64A(p)
#endif
#ifndef AV_WL64A
#define AV_WL64A(p, v)
#endif

/*
 * The AV_COPYxxU macros are suitable for copying data to/from unaligned
 * memory locations.
 */

#define AV_COPYU(n, d, s)

#ifndef AV_COPY16U
#define AV_COPY16U(d, s)
#endif

#ifndef AV_COPY32U
#define AV_COPY32U(d, s)
#endif

#ifndef AV_COPY64U
#define AV_COPY64U(d, s)
#endif

#ifndef AV_COPY128U
#define AV_COPY128U(d, s)
#endif

/* Parameters for AV_COPY*, AV_SWAP*, AV_ZERO* must be
 * naturally aligned.
 */

#define AV_COPY(n, d, s)

#ifndef AV_COPY16
#define AV_COPY16(d, s)
#endif

#ifndef AV_COPY32
#define AV_COPY32(d, s)
#endif

#ifndef AV_COPY64
#define AV_COPY64(d, s)
#endif

#ifndef AV_COPY128
#define AV_COPY128
#endif

#define AV_SWAP(n, a, b)

#ifndef AV_SWAP64
#define AV_SWAP64(a, b)
#endif

#define AV_ZERO(n, d)

#ifndef AV_ZERO16
#define AV_ZERO16(d)
#endif

#ifndef AV_ZERO32
#define AV_ZERO32(d)
#endif

#ifndef AV_ZERO64
#define AV_ZERO64(d)
#endif

#ifndef AV_ZERO128
#define AV_ZERO128
#endif

#endif /* AVUTIL_INTREADWRITE_H */