linux/kernel/irq_work.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
 *
 * Provides a framework for enqueueing and running callbacks from hardirq
 * context. The enqueueing is NMI-safe.
 */

#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/irq_work.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <linux/irqflags.h>
#include <linux/sched.h>
#include <linux/tick.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/smp.h>
#include <linux/smpboot.h>
#include <asm/processor.h>
#include <linux/kasan.h>

#include <trace/events/ipi.h>

static DEFINE_PER_CPU(struct llist_head, raised_list);
static DEFINE_PER_CPU(struct llist_head, lazy_list);
static DEFINE_PER_CPU(struct task_struct *, irq_workd);

static void wake_irq_workd(void)
{}

#ifdef CONFIG_SMP
static void irq_work_wake(struct irq_work *entry)
{}

static DEFINE_PER_CPU(struct irq_work, irq_work_wakeup) =;
#endif

static int irq_workd_should_run(unsigned int cpu)
{}

/*
 * Claim the entry so that no one else will poke at it.
 */
static bool irq_work_claim(struct irq_work *work)
{}

void __weak arch_irq_work_raise(void)
{}

static __always_inline void irq_work_raise(struct irq_work *work)
{}

/* Enqueue on current CPU, work must already be claimed and preempt disabled */
static void __irq_work_queue_local(struct irq_work *work)
{}

/* Enqueue the irq work @work on the current CPU */
bool irq_work_queue(struct irq_work *work)
{}
EXPORT_SYMBOL_GPL();

/*
 * Enqueue the irq_work @work on @cpu unless it's already pending
 * somewhere.
 *
 * Can be re-enqueued while the callback is still in progress.
 */
bool irq_work_queue_on(struct irq_work *work, int cpu)
{}

bool irq_work_needs_cpu(void)
{}

void irq_work_single(void *arg)
{}

static void irq_work_run_list(struct llist_head *list)
{}

/*
 * hotplug calls this through:
 *  hotplug_cfd() -> flush_smp_call_function_queue()
 */
void irq_work_run(void)
{}
EXPORT_SYMBOL_GPL();

void irq_work_tick(void)
{}

/*
 * Synchronize against the irq_work @entry, ensures the entry is not
 * currently in use.
 */
void irq_work_sync(struct irq_work *work)
{}
EXPORT_SYMBOL_GPL();

static void run_irq_workd(unsigned int cpu)
{}

static void irq_workd_setup(unsigned int cpu)
{}

static struct smp_hotplug_thread irqwork_threads =;

static __init int irq_work_init_threads(void)
{}
early_initcall(irq_work_init_threads);