linux/arch/x86/include/asm/percpu.h

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_PERCPU_H
#define _ASM_X86_PERCPU_H

#ifdef CONFIG_X86_64
#define __percpu_seg
#define __percpu_rel
#else
#define __percpu_seg
#define __percpu_rel
#endif

#ifdef __ASSEMBLY__

#ifdef CONFIG_SMP
#define __percpu
#else
#define __percpu
#endif

#define PER_CPU_VAR(var)

#ifdef CONFIG_X86_64_SMP
#define INIT_PER_CPU_VAR(var)
#else
#define INIT_PER_CPU_VAR
#endif

#else /* !__ASSEMBLY__: */

#include <linux/build_bug.h>
#include <linux/stringify.h>
#include <asm/asm.h>

#ifdef CONFIG_SMP

#ifdef CONFIG_CC_HAS_NAMED_AS

#ifdef __CHECKER__
#define __seg_gs
#define __seg_fs
#endif

#ifdef CONFIG_X86_64
#define __percpu_seg_override
#else
#define __percpu_seg_override
#endif

#define __percpu_prefix

#else /* !CONFIG_CC_HAS_NAMED_AS: */

#define __percpu_seg_override
#define __percpu_prefix

#endif /* CONFIG_CC_HAS_NAMED_AS */

#define __force_percpu_prefix
#define __my_cpu_offset

/*
 * Compared to the generic __my_cpu_offset version, the following
 * saves one instruction and avoids clobbering a temp register.
 *
 * arch_raw_cpu_ptr should not be used in 32-bit VDSO for a 64-bit
 * kernel, because games are played with CONFIG_X86_64 there and
 * sizeof(this_cpu_off) becames 4.
 */
#ifndef BUILD_VDSO32_64
#define arch_raw_cpu_ptr
#else
#define arch_raw_cpu_ptr
#endif

#define PER_CPU_VAR

#else /* !CONFIG_SMP: */

#define __percpu_seg_override
#define __percpu_prefix
#define __force_percpu_prefix

#define PER_CPU_VAR

#endif /* CONFIG_SMP */

#define __my_cpu_type
#define __my_cpu_ptr
#define __my_cpu_var
#define __percpu_arg
#define __force_percpu_arg

/*
 * Initialized pointers to per-CPU variables needed for the boot
 * processor need to use these macros to get the proper address
 * offset from __per_cpu_load on SMP.
 *
 * There also must be an entry in vmlinux_64.lds.S
 */
#define DECLARE_INIT_PER_CPU

#ifdef CONFIG_X86_64_SMP
#define init_per_cpu_var
#else
#define init_per_cpu_var
#endif

/*
 * For arch-specific code, we can use direct single-insn ops (they
 * don't give an lvalue though).
 */

#define __pcpu_type_1
#define __pcpu_type_2
#define __pcpu_type_4
#define __pcpu_type_8

#define __pcpu_cast_1
#define __pcpu_cast_2
#define __pcpu_cast_4
#define __pcpu_cast_8

#define __pcpu_op1_1
#define __pcpu_op1_2
#define __pcpu_op1_4
#define __pcpu_op1_8

#define __pcpu_op2_1
#define __pcpu_op2_2
#define __pcpu_op2_4
#define __pcpu_op2_8

#define __pcpu_reg_1
#define __pcpu_reg_2
#define __pcpu_reg_4
#define __pcpu_reg_8

#define __pcpu_reg_imm_1
#define __pcpu_reg_imm_2
#define __pcpu_reg_imm_4
#define __pcpu_reg_imm_8

#ifdef CONFIG_USE_X86_SEG_SUPPORT

#define __raw_cpu_read

#define __raw_cpu_write

#define __raw_cpu_read_const

#else /* !CONFIG_USE_X86_SEG_SUPPORT: */

#define __raw_cpu_read

#define __raw_cpu_write

/*
 * The generic per-CPU infrastrucutre is not suitable for
 * reading const-qualified variables.
 */
#define __raw_cpu_read_const

#endif /* CONFIG_USE_X86_SEG_SUPPORT */

#define __raw_cpu_read_stable

#define percpu_unary_op

#define percpu_binary_op

/*
 * Generate a per-CPU add to memory instruction and optimize code
 * if one is added or subtracted.
 */
#define percpu_add_op

/*
 * Add return operation
 */
#define percpu_add_return_op

/*
 * raw_cpu_xchg() can use a load-store since
 * it is not required to be IRQ-safe.
 */
#define raw_percpu_xchg_op

/*
 * this_cpu_xchg() is implemented using CMPXCHG without a LOCK prefix.
 * XCHG is expensive due to the implied LOCK prefix. The processor
 * cannot prefetch cachelines if XCHG is used.
 */
#define this_percpu_xchg_op

/*
 * CMPXCHG has no such implied lock semantics as a result it is much
 * more efficient for CPU-local operations.
 */
#define percpu_cmpxchg_op

#define percpu_try_cmpxchg_op

#if defined(CONFIG_X86_32) && !defined(CONFIG_UML)

#define percpu_cmpxchg64_op

#define raw_cpu_cmpxchg64
#define this_cpu_cmpxchg64

#define percpu_try_cmpxchg64_op

#define raw_cpu_try_cmpxchg64
#define this_cpu_try_cmpxchg64

#endif /* defined(CONFIG_X86_32) && !defined(CONFIG_UML) */

#ifdef CONFIG_X86_64
#define raw_cpu_cmpxchg64
#define this_cpu_cmpxchg64

#define raw_cpu_try_cmpxchg64
#define this_cpu_try_cmpxchg64

#define percpu_cmpxchg128_op

#define raw_cpu_cmpxchg128
#define this_cpu_cmpxchg128

#define percpu_try_cmpxchg128_op

#define raw_cpu_try_cmpxchg128
#define this_cpu_try_cmpxchg128

#endif /* CONFIG_X86_64 */

#define raw_cpu_read_1
#define raw_cpu_read_2
#define raw_cpu_read_4
#define raw_cpu_write_1
#define raw_cpu_write_2
#define raw_cpu_write_4

#define this_cpu_read_1
#define this_cpu_read_2
#define this_cpu_read_4
#define this_cpu_write_1
#define this_cpu_write_2
#define this_cpu_write_4

#define this_cpu_read_stable_1
#define this_cpu_read_stable_2
#define this_cpu_read_stable_4

#define raw_cpu_add_1
#define raw_cpu_add_2
#define raw_cpu_add_4
#define raw_cpu_and_1
#define raw_cpu_and_2
#define raw_cpu_and_4
#define raw_cpu_or_1
#define raw_cpu_or_2
#define raw_cpu_or_4
#define raw_cpu_xchg_1
#define raw_cpu_xchg_2
#define raw_cpu_xchg_4

#define this_cpu_add_1
#define this_cpu_add_2
#define this_cpu_add_4
#define this_cpu_and_1
#define this_cpu_and_2
#define this_cpu_and_4
#define this_cpu_or_1
#define this_cpu_or_2
#define this_cpu_or_4
#define this_cpu_xchg_1
#define this_cpu_xchg_2
#define this_cpu_xchg_4

#define raw_cpu_add_return_1
#define raw_cpu_add_return_2
#define raw_cpu_add_return_4
#define raw_cpu_cmpxchg_1
#define raw_cpu_cmpxchg_2
#define raw_cpu_cmpxchg_4
#define raw_cpu_try_cmpxchg_1
#define raw_cpu_try_cmpxchg_2
#define raw_cpu_try_cmpxchg_4

#define this_cpu_add_return_1
#define this_cpu_add_return_2
#define this_cpu_add_return_4
#define this_cpu_cmpxchg_1
#define this_cpu_cmpxchg_2
#define this_cpu_cmpxchg_4
#define this_cpu_try_cmpxchg_1
#define this_cpu_try_cmpxchg_2
#define this_cpu_try_cmpxchg_4

/*
 * Per-CPU atomic 64-bit operations are only available under 64-bit kernels.
 * 32-bit kernels must fall back to generic operations.
 */
#ifdef CONFIG_X86_64

#define raw_cpu_read_8
#define raw_cpu_write_8

#define this_cpu_read_8
#define this_cpu_write_8

#define this_cpu_read_stable_8

#define raw_cpu_add_8
#define raw_cpu_and_8
#define raw_cpu_or_8
#define raw_cpu_add_return_8
#define raw_cpu_xchg_8
#define raw_cpu_cmpxchg_8
#define raw_cpu_try_cmpxchg_8

#define this_cpu_add_8
#define this_cpu_and_8
#define this_cpu_or_8
#define this_cpu_add_return_8
#define this_cpu_xchg_8
#define this_cpu_cmpxchg_8
#define this_cpu_try_cmpxchg_8

#define raw_cpu_read_long

#else /* !CONFIG_X86_64: */

/* There is no generic 64-bit read stable operation for 32-bit targets. */
#define this_cpu_read_stable_8

#define raw_cpu_read_long

#endif /* CONFIG_X86_64 */

#define this_cpu_read_const

/*
 * this_cpu_read() makes the compiler load the per-CPU variable every time
 * it is accessed while this_cpu_read_stable() allows the value to be cached.
 * this_cpu_read_stable() is more efficient and can be used if its value
 * is guaranteed to be valid across CPUs.  The current users include
 * pcpu_hot.current_task and pcpu_hot.top_of_stack, both of which are
 * actually per-thread variables implemented as per-CPU variables and
 * thus stable for the duration of the respective task.
 */
#define this_cpu_read_stable

#define x86_this_cpu_constant_test_bit

#define x86_this_cpu_variable_test_bit

#define x86_this_cpu_test_bit


#include <asm-generic/percpu.h>

/* We can use this directly for local CPU (faster). */
DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off);

#endif /* !__ASSEMBLY__ */

#ifdef CONFIG_SMP

/*
 * Define the "EARLY_PER_CPU" macros.  These are used for some per_cpu
 * variables that are initialized and accessed before there are per_cpu
 * areas allocated.
 */

#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)

#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue)

#define EXPORT_EARLY_PER_CPU_SYMBOL(_name)

#define DECLARE_EARLY_PER_CPU(_type, _name)

#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name)

#define early_per_cpu_ptr(_name)
#define early_per_cpu_map(_name, _idx)

#define early_per_cpu(_name, _cpu)

#else /* !CONFIG_SMP: */
#define DEFINE_EARLY_PER_CPU

#define DEFINE_EARLY_PER_CPU_READ_MOSTLY

#define EXPORT_EARLY_PER_CPU_SYMBOL

#define DECLARE_EARLY_PER_CPU

#define DECLARE_EARLY_PER_CPU_READ_MOSTLY

#define early_per_cpu
#define early_per_cpu_ptr
/* no early_per_cpu_map() */

#endif /* !CONFIG_SMP */

#endif /* _ASM_X86_PERCPU_H */