linux/arch/x86/include/asm/cmpxchg.h

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ASM_X86_CMPXCHG_H
#define ASM_X86_CMPXCHG_H

#include <linux/compiler.h>
#include <asm/cpufeatures.h>
#include <asm/alternative.h> /* Provides LOCK_PREFIX */

/*
 * Non-existent functions to indicate usage errors at link time
 * (or compile-time if the compiler implements __compiletime_error().
 */
extern void __xchg_wrong_size(void)
	__compiletime_error("Bad argument size for xchg");
extern void __cmpxchg_wrong_size(void)
	__compiletime_error("Bad argument size for cmpxchg");
extern void __xadd_wrong_size(void)
	__compiletime_error("Bad argument size for xadd");
extern void __add_wrong_size(void)
	__compiletime_error("Bad argument size for add");

/*
 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
 * -1 because sizeof will never return -1, thereby making those switch
 * case statements guaranteed dead code which the compiler will
 * eliminate, and allowing the "missing symbol in the default case" to
 * indicate a usage error.
 */
#define __X86_CASE_B
#define __X86_CASE_W
#define __X86_CASE_L
#ifdef CONFIG_64BIT
#define __X86_CASE_Q
#else
#define __X86_CASE_Q
#endif

/* 
 * An exchange-type operation, which takes a value and a pointer, and
 * returns the old value.
 */
#define __xchg_op(ptr, arg, op, lock)

/*
 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
 * Since this is generally used to protect other memory information, we
 * use "asm volatile" and "memory" clobbers to prevent gcc from moving
 * information around.
 */
#define arch_xchg(ptr, v)

/*
 * Atomic compare and exchange.  Compare OLD with MEM, if identical,
 * store NEW in MEM.  Return the initial value in MEM.  Success is
 * indicated by comparing RETURN with OLD.
 */
#define __raw_cmpxchg(ptr, old, new, size, lock)

#define __cmpxchg(ptr, old, new, size)

#define __sync_cmpxchg(ptr, old, new, size)

#define __cmpxchg_local(ptr, old, new, size)

#ifdef CONFIG_X86_32
# include <asm/cmpxchg_32.h>
#else
# include <asm/cmpxchg_64.h>
#endif

#define arch_cmpxchg(ptr, old, new)

#define arch_sync_cmpxchg(ptr, old, new)

#define arch_cmpxchg_local(ptr, old, new)


#define __raw_try_cmpxchg(_ptr, _pold, _new, size, lock)

#define __try_cmpxchg(ptr, pold, new, size)

#define __sync_try_cmpxchg(ptr, pold, new, size)

#define __try_cmpxchg_local(ptr, pold, new, size)

#define arch_try_cmpxchg(ptr, pold, new)

#define arch_sync_try_cmpxchg(ptr, pold, new)

#define arch_try_cmpxchg_local(ptr, pold, new)

/*
 * xadd() adds "inc" to "*ptr" and atomically returns the previous
 * value of "*ptr".
 *
 * xadd() is locked when multiple CPUs are online
 */
#define __xadd(ptr, inc, lock)
#define xadd(ptr, inc)

#endif	/* ASM_X86_CMPXCHG_H */