linux/arch/x86/include/asm/percpu.h

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_PERCPU_H
#define _ASM_X86_PERCPU_H

#ifdef CONFIG_X86_64
#define __percpu_seg
#define __percpu_rel
#else
#define __percpu_seg
#define __percpu_rel
#endif

#ifdef __ASSEMBLY__

#ifdef CONFIG_SMP
#define __percpu
#else
#define __percpu
#endif

#define PER_CPU_VAR

#ifdef CONFIG_X86_64_SMP
#define INIT_PER_CPU_VAR
#else
#define INIT_PER_CPU_VAR
#endif

#else /* !__ASSEMBLY__: */

#include <linux/build_bug.h>
#include <linux/stringify.h>
#include <asm/asm.h>

#ifdef CONFIG_SMP

#ifdef CONFIG_CC_HAS_NAMED_AS

#ifdef __CHECKER__
#define __seg_gs
#define __seg_fs
#endif

#ifdef CONFIG_X86_64
#define __percpu_seg_override
#else
#define __percpu_seg_override
#endif

#define __percpu_prefix

#else /* !CONFIG_CC_HAS_NAMED_AS: */

#define __percpu_seg_override
#define __percpu_prefix

#endif /* CONFIG_CC_HAS_NAMED_AS */

#define __force_percpu_prefix
#define __my_cpu_offset

/*
 * Compared to the generic __my_cpu_offset version, the following
 * saves one instruction and avoids clobbering a temp register.
 *
 * arch_raw_cpu_ptr should not be used in 32-bit VDSO for a 64-bit
 * kernel, because games are played with CONFIG_X86_64 there and
 * sizeof(this_cpu_off) becames 4.
 */
#ifndef BUILD_VDSO32_64
#define arch_raw_cpu_ptr(_ptr)
#else
#define arch_raw_cpu_ptr
#endif

#define PER_CPU_VAR(var)

#else /* !CONFIG_SMP: */

#define __percpu_seg_override
#define __percpu_prefix
#define __force_percpu_prefix

#define PER_CPU_VAR

#endif /* CONFIG_SMP */

#define __my_cpu_type(var)
#define __my_cpu_ptr(ptr)
#define __my_cpu_var(var)
#define __percpu_arg(x)
#define __force_percpu_arg(x)

/*
 * Initialized pointers to per-CPU variables needed for the boot
 * processor need to use these macros to get the proper address
 * offset from __per_cpu_load on SMP.
 *
 * There also must be an entry in vmlinux_64.lds.S
 */
#define DECLARE_INIT_PER_CPU(var)

#ifdef CONFIG_X86_64_SMP
#define init_per_cpu_var(var)
#else
#define init_per_cpu_var
#endif

/*
 * For arch-specific code, we can use direct single-insn ops (they
 * don't give an lvalue though).
 */

#define __pcpu_type_1
#define __pcpu_type_2
#define __pcpu_type_4
#define __pcpu_type_8

#define __pcpu_cast_1(val)
#define __pcpu_cast_2(val)
#define __pcpu_cast_4(val)
#define __pcpu_cast_8(val)

#define __pcpu_op1_1(op, dst)
#define __pcpu_op1_2(op, dst)
#define __pcpu_op1_4(op, dst)
#define __pcpu_op1_8(op, dst)

#define __pcpu_op2_1(op, src, dst)
#define __pcpu_op2_2(op, src, dst)
#define __pcpu_op2_4(op, src, dst)
#define __pcpu_op2_8(op, src, dst)

#define __pcpu_reg_1(mod, x)
#define __pcpu_reg_2(mod, x)
#define __pcpu_reg_4(mod, x)
#define __pcpu_reg_8(mod, x)

#define __pcpu_reg_imm_1(x)
#define __pcpu_reg_imm_2(x)
#define __pcpu_reg_imm_4(x)
#define __pcpu_reg_imm_8(x)

#ifdef CONFIG_USE_X86_SEG_SUPPORT

#define __raw_cpu_read

#define __raw_cpu_write

#define __raw_cpu_read_const

#else /* !CONFIG_USE_X86_SEG_SUPPORT: */

#define __raw_cpu_read(size, qual, _var)

#define __raw_cpu_write(size, qual, _var, _val)

/*
 * The generic per-CPU infrastrucutre is not suitable for
 * reading const-qualified variables.
 */
#define __raw_cpu_read_const(pcp)

#endif /* CONFIG_USE_X86_SEG_SUPPORT */

#define __raw_cpu_read_stable(size, _var)

#define percpu_unary_op(size, qual, op, _var)

#define percpu_binary_op(size, qual, op, _var, _val)

/*
 * Generate a per-CPU add to memory instruction and optimize code
 * if one is added or subtracted.
 */
#define percpu_add_op(size, qual, var, val)

/*
 * Add return operation
 */
#define percpu_add_return_op(size, qual, _var, _val)

/*
 * raw_cpu_xchg() can use a load-store since
 * it is not required to be IRQ-safe.
 */
#define raw_percpu_xchg_op(_var, _nval)

/*
 * this_cpu_xchg() is implemented using CMPXCHG without a LOCK prefix.
 * XCHG is expensive due to the implied LOCK prefix. The processor
 * cannot prefetch cachelines if XCHG is used.
 */
#define this_percpu_xchg_op(_var, _nval)

/*
 * CMPXCHG has no such implied lock semantics as a result it is much
 * more efficient for CPU-local operations.
 */
#define percpu_cmpxchg_op(size, qual, _var, _oval, _nval)

#define percpu_try_cmpxchg_op(size, qual, _var, _ovalp, _nval)

#if defined(CONFIG_X86_32) && !defined(CONFIG_UML)

#define percpu_cmpxchg64_op

#define raw_cpu_cmpxchg64
#define this_cpu_cmpxchg64

#define percpu_try_cmpxchg64_op

#define raw_cpu_try_cmpxchg64
#define this_cpu_try_cmpxchg64

#endif /* defined(CONFIG_X86_32) && !defined(CONFIG_UML) */

#ifdef CONFIG_X86_64
#define raw_cpu_cmpxchg64(pcp, oval, nval)
#define this_cpu_cmpxchg64(pcp, oval, nval)

#define raw_cpu_try_cmpxchg64(pcp, ovalp, nval)
#define this_cpu_try_cmpxchg64(pcp, ovalp, nval)

#define percpu_cmpxchg128_op(size, qual, _var, _oval, _nval)

#define raw_cpu_cmpxchg128(pcp, oval, nval)
#define this_cpu_cmpxchg128(pcp, oval, nval)

#define percpu_try_cmpxchg128_op(size, qual, _var, _ovalp, _nval)

#define raw_cpu_try_cmpxchg128(pcp, ovalp, nval)
#define this_cpu_try_cmpxchg128(pcp, ovalp, nval)

#endif /* CONFIG_X86_64 */

#define raw_cpu_read_1(pcp)
#define raw_cpu_read_2(pcp)
#define raw_cpu_read_4(pcp)
#define raw_cpu_write_1(pcp, val)
#define raw_cpu_write_2(pcp, val)
#define raw_cpu_write_4(pcp, val)

#define this_cpu_read_1(pcp)
#define this_cpu_read_2(pcp)
#define this_cpu_read_4(pcp)
#define this_cpu_write_1(pcp, val)
#define this_cpu_write_2(pcp, val)
#define this_cpu_write_4(pcp, val)

#define this_cpu_read_stable_1(pcp)
#define this_cpu_read_stable_2(pcp)
#define this_cpu_read_stable_4(pcp)

#define raw_cpu_add_1(pcp, val)
#define raw_cpu_add_2(pcp, val)
#define raw_cpu_add_4(pcp, val)
#define raw_cpu_and_1(pcp, val)
#define raw_cpu_and_2(pcp, val)
#define raw_cpu_and_4(pcp, val)
#define raw_cpu_or_1(pcp, val)
#define raw_cpu_or_2(pcp, val)
#define raw_cpu_or_4(pcp, val)
#define raw_cpu_xchg_1(pcp, val)
#define raw_cpu_xchg_2(pcp, val)
#define raw_cpu_xchg_4(pcp, val)

#define this_cpu_add_1(pcp, val)
#define this_cpu_add_2(pcp, val)
#define this_cpu_add_4(pcp, val)
#define this_cpu_and_1(pcp, val)
#define this_cpu_and_2(pcp, val)
#define this_cpu_and_4(pcp, val)
#define this_cpu_or_1(pcp, val)
#define this_cpu_or_2(pcp, val)
#define this_cpu_or_4(pcp, val)
#define this_cpu_xchg_1(pcp, nval)
#define this_cpu_xchg_2(pcp, nval)
#define this_cpu_xchg_4(pcp, nval)

#define raw_cpu_add_return_1(pcp, val)
#define raw_cpu_add_return_2(pcp, val)
#define raw_cpu_add_return_4(pcp, val)
#define raw_cpu_cmpxchg_1(pcp, oval, nval)
#define raw_cpu_cmpxchg_2(pcp, oval, nval)
#define raw_cpu_cmpxchg_4(pcp, oval, nval)
#define raw_cpu_try_cmpxchg_1(pcp, ovalp, nval)
#define raw_cpu_try_cmpxchg_2(pcp, ovalp, nval)
#define raw_cpu_try_cmpxchg_4(pcp, ovalp, nval)

#define this_cpu_add_return_1(pcp, val)
#define this_cpu_add_return_2(pcp, val)
#define this_cpu_add_return_4(pcp, val)
#define this_cpu_cmpxchg_1(pcp, oval, nval)
#define this_cpu_cmpxchg_2(pcp, oval, nval)
#define this_cpu_cmpxchg_4(pcp, oval, nval)
#define this_cpu_try_cmpxchg_1(pcp, ovalp, nval)
#define this_cpu_try_cmpxchg_2(pcp, ovalp, nval)
#define this_cpu_try_cmpxchg_4(pcp, ovalp, nval)

/*
 * Per-CPU atomic 64-bit operations are only available under 64-bit kernels.
 * 32-bit kernels must fall back to generic operations.
 */
#ifdef CONFIG_X86_64

#define raw_cpu_read_8(pcp)
#define raw_cpu_write_8(pcp, val)

#define this_cpu_read_8(pcp)
#define this_cpu_write_8(pcp, val)

#define this_cpu_read_stable_8(pcp)

#define raw_cpu_add_8(pcp, val)
#define raw_cpu_and_8(pcp, val)
#define raw_cpu_or_8(pcp, val)
#define raw_cpu_add_return_8(pcp, val)
#define raw_cpu_xchg_8(pcp, nval)
#define raw_cpu_cmpxchg_8(pcp, oval, nval)
#define raw_cpu_try_cmpxchg_8(pcp, ovalp, nval)

#define this_cpu_add_8(pcp, val)
#define this_cpu_and_8(pcp, val)
#define this_cpu_or_8(pcp, val)
#define this_cpu_add_return_8(pcp, val)
#define this_cpu_xchg_8(pcp, nval)
#define this_cpu_cmpxchg_8(pcp, oval, nval)
#define this_cpu_try_cmpxchg_8(pcp, ovalp, nval)

#define raw_cpu_read_long(pcp)

#else /* !CONFIG_X86_64: */

/* There is no generic 64-bit read stable operation for 32-bit targets. */
#define this_cpu_read_stable_8

#define raw_cpu_read_long

#endif /* CONFIG_X86_64 */

#define this_cpu_read_const(pcp)

/*
 * this_cpu_read() makes the compiler load the per-CPU variable every time
 * it is accessed while this_cpu_read_stable() allows the value to be cached.
 * this_cpu_read_stable() is more efficient and can be used if its value
 * is guaranteed to be valid across CPUs.  The current users include
 * pcpu_hot.current_task and pcpu_hot.top_of_stack, both of which are
 * actually per-thread variables implemented as per-CPU variables and
 * thus stable for the duration of the respective task.
 */
#define this_cpu_read_stable(pcp)

#define x86_this_cpu_constant_test_bit(_nr, _var)

#define x86_this_cpu_variable_test_bit(_nr, _var)

#define x86_this_cpu_test_bit(_nr, _var)


#include <asm-generic/percpu.h>

/* We can use this directly for local CPU (faster). */
DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off);

#endif /* !__ASSEMBLY__ */

#ifdef CONFIG_SMP

/*
 * Define the "EARLY_PER_CPU" macros.  These are used for some per_cpu
 * variables that are initialized and accessed before there are per_cpu
 * areas allocated.
 */

#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)

#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue)

#define EXPORT_EARLY_PER_CPU_SYMBOL(_name)

#define DECLARE_EARLY_PER_CPU(_type, _name)

#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name)

#define early_per_cpu_ptr(_name)
#define early_per_cpu_map(_name, _idx)

#define early_per_cpu(_name, _cpu)

#else /* !CONFIG_SMP: */
#define DEFINE_EARLY_PER_CPU

#define DEFINE_EARLY_PER_CPU_READ_MOSTLY

#define EXPORT_EARLY_PER_CPU_SYMBOL

#define DECLARE_EARLY_PER_CPU

#define DECLARE_EARLY_PER_CPU_READ_MOSTLY

#define early_per_cpu
#define early_per_cpu_ptr
/* no early_per_cpu_map() */

#endif /* !CONFIG_SMP */

#endif /* _ASM_X86_PERCPU_H */