#ifndef _ASM_X86_UACCESS_64_H
#define _ASM_X86_UACCESS_64_H
#include <linux/compiler.h>
#include <linux/lockdep.h>
#include <linux/kasan-checks.h>
#include <asm/alternative.h>
#include <asm/cpufeatures.h>
#include <asm/page.h>
#include <asm/percpu.h>
#ifdef CONFIG_ADDRESS_MASKING
static inline unsigned long __untagged_addr(unsigned long addr)
{ … }
#define untagged_addr(addr) …
static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
unsigned long addr)
{ … }
#define untagged_addr_remote(mm, addr) …
#endif
#define valid_user_address(x) …
static inline bool __access_ok(const void __user *ptr, unsigned long size)
{ … }
#define __access_ok …
__must_check unsigned long
rep_movs_alternative(void *to, const void *from, unsigned len);
static __always_inline __must_check unsigned long
copy_user_generic(void *to, const void *from, unsigned long len)
{ … }
static __always_inline __must_check unsigned long
raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
{ … }
static __always_inline __must_check unsigned long
raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
{ … }
extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size);
extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
static inline int
__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
unsigned size)
{ … }
static inline int
__copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
{ … }
__must_check unsigned long
rep_stos_alternative(void __user *addr, unsigned long len);
static __always_inline __must_check unsigned long __clear_user(void __user *addr, unsigned long size)
{ … }
static __always_inline unsigned long clear_user(void __user *to, unsigned long n)
{ … }
#endif