linux/arch/x86/kernel/irq.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Common interrupt code for 32 and 64 bit
 */
#include <linux/cpu.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/of.h>
#include <linux/seq_file.h>
#include <linux/smp.h>
#include <linux/ftrace.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/irq.h>

#include <asm/irq_stack.h>
#include <asm/apic.h>
#include <asm/io_apic.h>
#include <asm/irq.h>
#include <asm/mce.h>
#include <asm/hw_irq.h>
#include <asm/desc.h>
#include <asm/traps.h>
#include <asm/thermal.h>
#include <asm/posted_intr.h>
#include <asm/irq_remapping.h>

#define CREATE_TRACE_POINTS
#include <asm/trace/irq_vectors.h>

DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
EXPORT_PER_CPU_SYMBOL();

atomic_t irq_err_count;

/*
 * 'what should we do if we get a hw irq event on an illegal vector'.
 * each architecture has to answer this themselves.
 */
void ack_bad_irq(unsigned int irq)
{}

#define irq_stats(x)
/*
 * /proc/interrupts printing for arch specific interrupts
 */
int arch_show_interrupts(struct seq_file *p, int prec)
{}

/*
 * /proc/stat helpers
 */
u64 arch_irq_stat_cpu(unsigned int cpu)
{}

u64 arch_irq_stat(void)
{}

static __always_inline void handle_irq(struct irq_desc *desc,
				       struct pt_regs *regs)
{}

static __always_inline int call_irq_handler(int vector, struct pt_regs *regs)
{}

/*
 * common_interrupt() handles all normal device IRQ's (the special SMP
 * cross-CPU interrupts have their own entry points).
 */
DEFINE_IDTENTRY_IRQ(common_interrupt)
{}

#ifdef CONFIG_X86_LOCAL_APIC
/* Function pointer for generic interrupt vector handling */
void (*x86_platform_ipi_callback)(void) =;
/*
 * Handler for X86_PLATFORM_IPI_VECTOR.
 */
DEFINE_IDTENTRY_SYSVEC(sysvec_x86_platform_ipi)
{}
#endif

#if IS_ENABLED(CONFIG_KVM)
static void dummy_handler(void) {}
static void (*kvm_posted_intr_wakeup_handler)(void) =;

void kvm_set_posted_intr_wakeup_handler(void (*handler)(void))
{}
EXPORT_SYMBOL_GPL();

/*
 * Handler for POSTED_INTERRUPT_VECTOR.
 */
DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm_posted_intr_ipi)
{}

/*
 * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
 */
DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_posted_intr_wakeup_ipi)
{}

/*
 * Handler for POSTED_INTERRUPT_NESTED_VECTOR.
 */
DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm_posted_intr_nested_ipi)
{}
#endif

#ifdef CONFIG_X86_POSTED_MSI

/* Posted Interrupt Descriptors for coalesced MSIs to be posted */
DEFINE_PER_CPU_ALIGNED(struct pi_desc, posted_msi_pi_desc);

void intel_posted_msi_init(void)
{}

/*
 * De-multiplexing posted interrupts is on the performance path, the code
 * below is written to optimize the cache performance based on the following
 * considerations:
 * 1.Posted interrupt descriptor (PID) fits in a cache line that is frequently
 *   accessed by both CPU and IOMMU.
 * 2.During posted MSI processing, the CPU needs to do 64-bit read and xchg
 *   for checking and clearing posted interrupt request (PIR), a 256 bit field
 *   within the PID.
 * 3.On the other side, the IOMMU does atomic swaps of the entire PID cache
 *   line when posting interrupts and setting control bits.
 * 4.The CPU can access the cache line a magnitude faster than the IOMMU.
 * 5.Each time the IOMMU does interrupt posting to the PIR will evict the PID
 *   cache line. The cache line states after each operation are as follows:
 *   CPU		IOMMU			PID Cache line state
 *   ---------------------------------------------------------------
 *...read64					exclusive
 *...lock xchg64				modified
 *...			post/atomic swap	invalid
 *...-------------------------------------------------------------
 *
 * To reduce L1 data cache miss, it is important to avoid contention with
 * IOMMU's interrupt posting/atomic swap. Therefore, a copy of PIR is used
 * to dispatch interrupt handlers.
 *
 * In addition, the code is trying to keep the cache line state consistent
 * as much as possible. e.g. when making a copy and clearing the PIR
 * (assuming non-zero PIR bits are present in the entire PIR), it does:
 *		read, read, read, read, xchg, xchg, xchg, xchg
 * instead of:
 *		read, xchg, read, xchg, read, xchg, read, xchg
 */
static __always_inline bool handle_pending_pir(u64 *pir, struct pt_regs *regs)
{}

/*
 * Performance data shows that 3 is good enough to harvest 90+% of the benefit
 * on high IRQ rate workload.
 */
#define MAX_POSTED_MSI_COALESCING_LOOP

/*
 * For MSIs that are delivered as posted interrupts, the CPU notifications
 * can be coalesced if the MSIs arrive in high frequency bursts.
 */
DEFINE_IDTENTRY_SYSVEC(sysvec_posted_msi_notification)
{}
#endif /* X86_POSTED_MSI */

#ifdef CONFIG_HOTPLUG_CPU
/* A cpu has been removed from cpu_online_mask.  Reset irq affinities. */
void fixup_irqs(void)
{}
#endif

#ifdef CONFIG_X86_THERMAL_VECTOR
static void smp_thermal_vector(void)
{}

DEFINE_IDTENTRY_SYSVEC(sysvec_thermal)
{}
#endif