linux/arch/x86/include/asm/hardirq.h

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_HARDIRQ_H
#define _ASM_X86_HARDIRQ_H

#include <linux/threads.h>
#include <asm/current.h>

irq_cpustat_t;

DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);

#ifdef CONFIG_X86_POSTED_MSI
DECLARE_PER_CPU_ALIGNED(struct pi_desc, posted_msi_pi_desc);
#endif
#define __ARCH_IRQ_STAT

#define inc_irq_stat(member)

extern void ack_bad_irq(unsigned int irq);

extern u64 arch_irq_stat_cpu(unsigned int cpu);
#define arch_irq_stat_cpu

extern u64 arch_irq_stat(void);
#define arch_irq_stat

#define local_softirq_pending_ref

#if IS_ENABLED(CONFIG_KVM_INTEL)
/*
 * This function is called from noinstr interrupt contexts
 * and must be inlined to not get instrumentation.
 */
static __always_inline void kvm_set_cpu_l1tf_flush_l1d(void)
{}

static __always_inline void kvm_clear_cpu_l1tf_flush_l1d(void)
{}

static __always_inline bool kvm_get_cpu_l1tf_flush_l1d(void)
{}
#else /* !IS_ENABLED(CONFIG_KVM_INTEL) */
static __always_inline void kvm_set_cpu_l1tf_flush_l1d(void) { }
#endif /* IS_ENABLED(CONFIG_KVM_INTEL) */

#endif /* _ASM_X86_HARDIRQ_H */