linux/arch/x86/include/asm/atomic.h

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_ATOMIC_H
#define _ASM_X86_ATOMIC_H

#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/alternative.h>
#include <asm/cmpxchg.h>
#include <asm/rmwcc.h>
#include <asm/barrier.h>

/*
 * Atomic operations that C can't guarantee us.  Useful for
 * resource counting etc..
 */

static __always_inline int arch_atomic_read(const atomic_t *v)
{}

static __always_inline void arch_atomic_set(atomic_t *v, int i)
{}

static __always_inline void arch_atomic_add(int i, atomic_t *v)
{}

static __always_inline void arch_atomic_sub(int i, atomic_t *v)
{}

static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
{}
#define arch_atomic_sub_and_test

static __always_inline void arch_atomic_inc(atomic_t *v)
{}
#define arch_atomic_inc

static __always_inline void arch_atomic_dec(atomic_t *v)
{}
#define arch_atomic_dec

static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
{}
#define arch_atomic_dec_and_test

static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
{}
#define arch_atomic_inc_and_test

static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
{}
#define arch_atomic_add_negative

static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
{}
#define arch_atomic_add_return

#define arch_atomic_sub_return(i, v)

static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
{}
#define arch_atomic_fetch_add

#define arch_atomic_fetch_sub(i, v)

static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{}
#define arch_atomic_cmpxchg

static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{}
#define arch_atomic_try_cmpxchg

static __always_inline int arch_atomic_xchg(atomic_t *v, int new)
{}
#define arch_atomic_xchg

static __always_inline void arch_atomic_and(int i, atomic_t *v)
{}

static __always_inline int arch_atomic_fetch_and(int i, atomic_t *v)
{}
#define arch_atomic_fetch_and

static __always_inline void arch_atomic_or(int i, atomic_t *v)
{}

static __always_inline int arch_atomic_fetch_or(int i, atomic_t *v)
{}
#define arch_atomic_fetch_or

static __always_inline void arch_atomic_xor(int i, atomic_t *v)
{}

static __always_inline int arch_atomic_fetch_xor(int i, atomic_t *v)
{}
#define arch_atomic_fetch_xor

#ifdef CONFIG_X86_32
# include <asm/atomic64_32.h>
#else
# include <asm/atomic64_64.h>
#endif

#endif /* _ASM_X86_ATOMIC_H */