/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_LOCAL_H #define _ASM_X86_LOCAL_H #include <linux/percpu.h> #include <linux/atomic.h> #include <asm/asm.h> local_t; #define LOCAL_INIT(i) … #define local_read(l) … #define local_set(l, i) … static inline void local_inc(local_t *l) { … } static inline void local_dec(local_t *l) { … } static inline void local_add(long i, local_t *l) { … } static inline void local_sub(long i, local_t *l) { … } /** * local_sub_and_test - subtract value from variable and test result * @i: integer value to subtract * @l: pointer to type local_t * * Atomically subtracts @i from @l and returns * true if the result is zero, or false for all * other cases. */ static inline bool local_sub_and_test(long i, local_t *l) { … } /** * local_dec_and_test - decrement and test * @l: pointer to type local_t * * Atomically decrements @l by 1 and * returns true if the result is 0, or false for all other * cases. */ static inline bool local_dec_and_test(local_t *l) { … } /** * local_inc_and_test - increment and test * @l: pointer to type local_t * * Atomically increments @l by 1 * and returns true if the result is zero, or false for all * other cases. */ static inline bool local_inc_and_test(local_t *l) { … } /** * local_add_negative - add and test if negative * @i: integer value to add * @l: pointer to type local_t * * Atomically adds @i to @l and returns true * if the result is negative, or false when * result is greater than or equal to zero. */ static inline bool local_add_negative(long i, local_t *l) { … } /** * local_add_return - add and return * @i: integer value to add * @l: pointer to type local_t * * Atomically adds @i to @l and returns @i + @l */ static inline long local_add_return(long i, local_t *l) { … } static inline long local_sub_return(long i, local_t *l) { … } #define local_inc_return(l) … #define local_dec_return(l) … static inline long local_cmpxchg(local_t *l, long old, long new) { … } static inline bool local_try_cmpxchg(local_t *l, long *old, long new) { … } /* * Implement local_xchg using CMPXCHG instruction without the LOCK prefix. * XCHG is expensive due to the implied LOCK prefix. The processor * cannot prefetch cachelines if XCHG is used. */ static __always_inline long local_xchg(local_t *l, long n) { … } /** * local_add_unless - add unless the number is already a given value * @l: pointer of type local_t * @a: the amount to add to l... * @u: ...unless l is equal to u. * * Atomically adds @a to @l, if @v was not already @u. * Returns true if the addition was done. */ static __always_inline bool local_add_unless(local_t *l, long a, long u) { … } #define local_inc_not_zero(l) … /* On x86_32, these are no better than the atomic variants. * On x86-64 these are better than the atomic variants on SMP kernels * because they dont use a lock prefix. */ #define __local_inc(l) … #define __local_dec(l) … #define __local_add(i, l) … #define __local_sub(i, l) … #endif /* _ASM_X86_LOCAL_H */