linux/kernel/entry/common.c

// SPDX-License-Identifier: GPL-2.0

#include <linux/context_tracking.h>
#include <linux/entry-common.h>
#include <linux/resume_user_mode.h>
#include <linux/highmem.h>
#include <linux/jump_label.h>
#include <linux/kmsan.h>
#include <linux/livepatch.h>
#include <linux/audit.h>
#include <linux/tick.h>

#include "common.h"

#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>

static inline void syscall_enter_audit(struct pt_regs *regs, long syscall)
{}

long syscall_trace_enter(struct pt_regs *regs, long syscall,
				unsigned long work)
{}

noinstr void syscall_enter_from_user_mode_prepare(struct pt_regs *regs)
{}

/* Workaround to allow gradual conversion of architecture code */
void __weak arch_do_signal_or_restart(struct pt_regs *regs) {}

/**
 * exit_to_user_mode_loop - do any pending work before leaving to user space
 * @regs:	Pointer to pt_regs on entry stack
 * @ti_work:	TIF work flags as read by the caller
 */
__always_inline unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
						     unsigned long ti_work)
{}

/*
 * If SYSCALL_EMU is set, then the only reason to report is when
 * SINGLESTEP is set (i.e. PTRACE_SYSEMU_SINGLESTEP).  This syscall
 * instruction has been already reported in syscall_enter_from_user_mode().
 */
static inline bool report_single_step(unsigned long work)
{}

static void syscall_exit_work(struct pt_regs *regs, unsigned long work)
{}

/*
 * Syscall specific exit to user mode preparation. Runs with interrupts
 * enabled.
 */
static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
{}

static __always_inline void __syscall_exit_to_user_mode_work(struct pt_regs *regs)
{}

void syscall_exit_to_user_mode_work(struct pt_regs *regs)
{}

__visible noinstr void syscall_exit_to_user_mode(struct pt_regs *regs)
{}

noinstr void irqentry_enter_from_user_mode(struct pt_regs *regs)
{}

noinstr void irqentry_exit_to_user_mode(struct pt_regs *regs)
{}

noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
{}

void raw_irqentry_exit_cond_resched(void)
{}
#ifdef CONFIG_PREEMPT_DYNAMIC
#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
DEFINE_STATIC_CALL();
#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
void dynamic_irqentry_exit_cond_resched(void)
{
	if (!static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
		return;
	raw_irqentry_exit_cond_resched();
}
#endif
#endif

noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)
{}

irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs)
{}

void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state)
{}