linux/arch/x86/include/asm/barrier.h

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_BARRIER_H
#define _ASM_X86_BARRIER_H

#include <asm/alternative.h>
#include <asm/nops.h>

/*
 * Force strict CPU ordering.
 * And yes, this might be required on UP too when we're talking
 * to devices.
 */

#ifdef CONFIG_X86_32
#define mb
#define rmb
#define wmb
#else
#define __mb()
#define __rmb()
#define __wmb()
#endif

/**
 * array_index_mask_nospec() - generate a mask that is ~0UL when the
 * 	bounds check succeeds and 0 otherwise
 * @index: array element index
 * @size: number of elements in array
 *
 * Returns:
 *     0 - (index < size)
 */
#define array_index_mask_nospec(idx,sz)

/* Prevent speculative execution past this barrier. */
#define barrier_nospec()

#define __dma_rmb()
#define __dma_wmb()

#define __smp_mb()

#define __smp_rmb()
#define __smp_wmb()
#define __smp_store_mb(var, value)

#define __smp_store_release(p, v)

#define __smp_load_acquire(p)

/* Atomic operations are already serializing on x86 */
#define __smp_mb__before_atomic()
#define __smp_mb__after_atomic()

/* Writing to CR3 provides a full memory barrier in switch_mm(). */
#define smp_mb__after_switch_mm()

#include <asm-generic/barrier.h>

#endif /* _ASM_X86_BARRIER_H */