Merge branch 'x86-uaccess-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 uaccess changes from Ingo Molnar: "A single change that micro-optimizes __copy_*_user_inatomic(), used by the futex code" * 'x86-uaccess-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86: Add 1/2/4/8 byte optimization to 64bit __copy_{from,to}_user_inatomic
This commit is contained in:
@@ -49,11 +49,10 @@ __must_check unsigned long
|
|||||||
copy_in_user(void __user *to, const void __user *from, unsigned len);
|
copy_in_user(void __user *to, const void __user *from, unsigned len);
|
||||||
|
|
||||||
static __always_inline __must_check
|
static __always_inline __must_check
|
||||||
int __copy_from_user(void *dst, const void __user *src, unsigned size)
|
int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
might_fault();
|
|
||||||
if (!__builtin_constant_p(size))
|
if (!__builtin_constant_p(size))
|
||||||
return copy_user_generic(dst, (__force void *)src, size);
|
return copy_user_generic(dst, (__force void *)src, size);
|
||||||
switch (size) {
|
switch (size) {
|
||||||
@@ -93,11 +92,17 @@ int __copy_from_user(void *dst, const void __user *src, unsigned size)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline __must_check
|
static __always_inline __must_check
|
||||||
int __copy_to_user(void __user *dst, const void *src, unsigned size)
|
int __copy_from_user(void *dst, const void __user *src, unsigned size)
|
||||||
|
{
|
||||||
|
might_fault();
|
||||||
|
return __copy_from_user_nocheck(dst, src, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
static __always_inline __must_check
|
||||||
|
int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
might_fault();
|
|
||||||
if (!__builtin_constant_p(size))
|
if (!__builtin_constant_p(size))
|
||||||
return copy_user_generic((__force void *)dst, src, size);
|
return copy_user_generic((__force void *)dst, src, size);
|
||||||
switch (size) {
|
switch (size) {
|
||||||
@@ -136,6 +141,13 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static __always_inline __must_check
|
||||||
|
int __copy_to_user(void __user *dst, const void *src, unsigned size)
|
||||||
|
{
|
||||||
|
might_fault();
|
||||||
|
return __copy_to_user_nocheck(dst, src, size);
|
||||||
|
}
|
||||||
|
|
||||||
static __always_inline __must_check
|
static __always_inline __must_check
|
||||||
int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
|
int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
|
||||||
{
|
{
|
||||||
@@ -192,13 +204,13 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
|
|||||||
static __must_check __always_inline int
|
static __must_check __always_inline int
|
||||||
__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
|
__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
|
||||||
{
|
{
|
||||||
return copy_user_generic(dst, (__force const void *)src, size);
|
return __copy_from_user_nocheck(dst, (__force const void *)src, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __must_check __always_inline int
|
static __must_check __always_inline int
|
||||||
__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
|
__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
|
||||||
{
|
{
|
||||||
return copy_user_generic((__force void *)dst, src, size);
|
return __copy_to_user_nocheck((__force void *)dst, src, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
extern long __copy_user_nocache(void *dst, const void __user *src,
|
extern long __copy_user_nocache(void *dst, const void __user *src,
|
||||||
|
Reference in New Issue
Block a user