/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_MWAIT_H #define _ASM_X86_MWAIT_H #include <linux/sched.h> #include <linux/sched/idle.h> #include <asm/cpufeature.h> #include <asm/nospec-branch.h> #define MWAIT_SUBSTATE_MASK … #define MWAIT_CSTATE_MASK … #define MWAIT_SUBSTATE_SIZE … #define MWAIT_HINT2CSTATE(hint) … #define MWAIT_HINT2SUBSTATE(hint) … #define MWAIT_C1_SUBSTATE_MASK … #define CPUID_MWAIT_LEAF … #define CPUID5_ECX_EXTENSIONS_SUPPORTED … #define CPUID5_ECX_INTERRUPT_BREAK … #define MWAIT_ECX_INTERRUPT_BREAK … #define MWAITX_ECX_TIMER_ENABLE … #define MWAITX_MAX_WAIT_CYCLES … #define MWAITX_DISABLE_CSTATES … #define TPAUSE_C01_STATE … #define TPAUSE_C02_STATE … static __always_inline void __monitor(const void *eax, unsigned long ecx, unsigned long edx) { … } static __always_inline void __monitorx(const void *eax, unsigned long ecx, unsigned long edx) { … } static __always_inline void __mwait(unsigned long eax, unsigned long ecx) { … } /* * MWAITX allows for a timer expiration to get the core out a wait state in * addition to the default MWAIT exit condition of a store appearing at a * monitored virtual address. * * Registers: * * MWAITX ECX[1]: enable timer if set * MWAITX EBX[31:0]: max wait time expressed in SW P0 clocks. The software P0 * frequency is the same as the TSC frequency. * * Below is a comparison between MWAIT and MWAITX on AMD processors: * * MWAIT MWAITX * opcode 0f 01 c9 | 0f 01 fb * ECX[0] value of RFLAGS.IF seen by instruction * ECX[1] unused/#GP if set | enable timer if set * ECX[31:2] unused/#GP if set * EAX unused (reserve for hint) * EBX[31:0] unused | max wait time (P0 clocks) * * MONITOR MONITORX * opcode 0f 01 c8 | 0f 01 fa * EAX (logical) address to monitor * ECX #GP if not zero */ static __always_inline void __mwaitx(unsigned long eax, unsigned long ebx, unsigned long ecx) { … } /* * Re-enable interrupts right upon calling mwait in such a way that * no interrupt can fire _before_ the execution of mwait, ie: no * instruction must be placed between "sti" and "mwait". * * This is necessary because if an interrupt queues a timer before * executing mwait, it would otherwise go unnoticed and the next tick * would not be reprogrammed accordingly before mwait ever wakes up. */ static __always_inline void __sti_mwait(unsigned long eax, unsigned long ecx) { … } /* * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, * which can obviate IPI to trigger checking of need_resched. * We execute MONITOR against need_resched and enter optimized wait state * through MWAIT. Whenever someone changes need_resched, we would be woken * up from MWAIT (without an IPI). * * New with Core Duo processors, MWAIT can take some hints based on CPU * capability. */ static __always_inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) { … } /* * Caller can specify whether to enter C0.1 (low latency, less * power saving) or C0.2 state (saves more power, but longer wakeup * latency). This may be overridden by the IA32_UMWAIT_CONTROL MSR * which can force requests for C0.2 to be downgraded to C0.1. */ static inline void __tpause(u32 ecx, u32 edx, u32 eax) { … } #endif /* _ASM_X86_MWAIT_H */