Merge branch 'x86/core' into core/percpu
This commit is contained in:
@ -1823,7 +1823,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
|
||||
int copy = min(bytes, iov->iov_len - base);
|
||||
|
||||
base = 0;
|
||||
left = __copy_from_user_inatomic_nocache(vaddr, buf, copy);
|
||||
left = __copy_from_user_inatomic(vaddr, buf, copy);
|
||||
copied += copy;
|
||||
bytes -= copy;
|
||||
vaddr += copy;
|
||||
@ -1851,8 +1851,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
|
||||
if (likely(i->nr_segs == 1)) {
|
||||
int left;
|
||||
char __user *buf = i->iov->iov_base + i->iov_offset;
|
||||
left = __copy_from_user_inatomic_nocache(kaddr + offset,
|
||||
buf, bytes);
|
||||
left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
|
||||
copied = bytes - left;
|
||||
} else {
|
||||
copied = __iovec_copy_from_user_inatomic(kaddr + offset,
|
||||
@ -1880,7 +1879,7 @@ size_t iov_iter_copy_from_user(struct page *page,
|
||||
if (likely(i->nr_segs == 1)) {
|
||||
int left;
|
||||
char __user *buf = i->iov->iov_base + i->iov_offset;
|
||||
left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
|
||||
left = __copy_from_user(kaddr + offset, buf, bytes);
|
||||
copied = bytes - left;
|
||||
} else {
|
||||
copied = __iovec_copy_from_user_inatomic(kaddr + offset,
|
||||
|
43
mm/shmem.c
43
mm/shmem.c
@ -169,13 +169,13 @@ static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
|
||||
*/
|
||||
static inline int shmem_acct_size(unsigned long flags, loff_t size)
|
||||
{
|
||||
return (flags & VM_ACCOUNT) ?
|
||||
security_vm_enough_memory_kern(VM_ACCT(size)) : 0;
|
||||
return (flags & VM_NORESERVE) ?
|
||||
0 : security_vm_enough_memory_kern(VM_ACCT(size));
|
||||
}
|
||||
|
||||
static inline void shmem_unacct_size(unsigned long flags, loff_t size)
|
||||
{
|
||||
if (flags & VM_ACCOUNT)
|
||||
if (!(flags & VM_NORESERVE))
|
||||
vm_unacct_memory(VM_ACCT(size));
|
||||
}
|
||||
|
||||
@ -187,13 +187,13 @@ static inline void shmem_unacct_size(unsigned long flags, loff_t size)
|
||||
*/
|
||||
static inline int shmem_acct_block(unsigned long flags)
|
||||
{
|
||||
return (flags & VM_ACCOUNT) ?
|
||||
0 : security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE));
|
||||
return (flags & VM_NORESERVE) ?
|
||||
security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0;
|
||||
}
|
||||
|
||||
static inline void shmem_unacct_blocks(unsigned long flags, long pages)
|
||||
{
|
||||
if (!(flags & VM_ACCOUNT))
|
||||
if (flags & VM_NORESERVE)
|
||||
vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
|
||||
}
|
||||
|
||||
@ -1515,8 +1515,8 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct inode *
|
||||
shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
|
||||
static struct inode *shmem_get_inode(struct super_block *sb, int mode,
|
||||
dev_t dev, unsigned long flags)
|
||||
{
|
||||
struct inode *inode;
|
||||
struct shmem_inode_info *info;
|
||||
@ -1537,6 +1537,7 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
|
||||
info = SHMEM_I(inode);
|
||||
memset(info, 0, (char *)inode - (char *)info);
|
||||
spin_lock_init(&info->lock);
|
||||
info->flags = flags & VM_NORESERVE;
|
||||
INIT_LIST_HEAD(&info->swaplist);
|
||||
|
||||
switch (mode & S_IFMT) {
|
||||
@ -1779,9 +1780,10 @@ static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
|
||||
static int
|
||||
shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
|
||||
{
|
||||
struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev);
|
||||
struct inode *inode;
|
||||
int error = -ENOSPC;
|
||||
|
||||
inode = shmem_get_inode(dir->i_sb, mode, dev, VM_NORESERVE);
|
||||
if (inode) {
|
||||
error = security_inode_init_security(inode, dir, NULL, NULL,
|
||||
NULL);
|
||||
@ -1920,7 +1922,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
|
||||
if (len > PAGE_CACHE_SIZE)
|
||||
return -ENAMETOOLONG;
|
||||
|
||||
inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0);
|
||||
inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
|
||||
if (!inode)
|
||||
return -ENOSPC;
|
||||
|
||||
@ -2332,7 +2334,7 @@ static int shmem_fill_super(struct super_block *sb,
|
||||
sb->s_flags |= MS_POSIXACL;
|
||||
#endif
|
||||
|
||||
inode = shmem_get_inode(sb, S_IFDIR | sbinfo->mode, 0);
|
||||
inode = shmem_get_inode(sb, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
|
||||
if (!inode)
|
||||
goto failed;
|
||||
inode->i_uid = sbinfo->uid;
|
||||
@ -2574,12 +2576,12 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define shmem_file_operations ramfs_file_operations
|
||||
#define shmem_vm_ops generic_file_vm_ops
|
||||
#define shmem_get_inode ramfs_get_inode
|
||||
#define shmem_acct_size(a, b) 0
|
||||
#define shmem_unacct_size(a, b) do {} while (0)
|
||||
#define SHMEM_MAX_BYTES LLONG_MAX
|
||||
#define shmem_vm_ops generic_file_vm_ops
|
||||
#define shmem_file_operations ramfs_file_operations
|
||||
#define shmem_get_inode(sb, mode, dev, flags) ramfs_get_inode(sb, mode, dev)
|
||||
#define shmem_acct_size(flags, size) 0
|
||||
#define shmem_unacct_size(flags, size) do {} while (0)
|
||||
#define SHMEM_MAX_BYTES LLONG_MAX
|
||||
|
||||
#endif /* CONFIG_SHMEM */
|
||||
|
||||
@ -2589,7 +2591,7 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
|
||||
* shmem_file_setup - get an unlinked file living in tmpfs
|
||||
* @name: name for dentry (to be seen in /proc/<pid>/maps
|
||||
* @size: size to be set for the file
|
||||
* @flags: vm_flags
|
||||
* @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
|
||||
*/
|
||||
struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
|
||||
{
|
||||
@ -2623,13 +2625,10 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
|
||||
goto put_dentry;
|
||||
|
||||
error = -ENOSPC;
|
||||
inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0);
|
||||
inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0, flags);
|
||||
if (!inode)
|
||||
goto close_file;
|
||||
|
||||
#ifdef CONFIG_SHMEM
|
||||
SHMEM_I(inode)->flags = (flags & VM_NORESERVE) ? 0 : VM_ACCOUNT;
|
||||
#endif
|
||||
d_instantiate(dentry, inode);
|
||||
inode->i_size = size;
|
||||
inode->i_nlink = 0; /* It is unlinked */
|
||||
|
13
mm/vmalloc.c
13
mm/vmalloc.c
@ -333,6 +333,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
|
||||
unsigned long addr;
|
||||
int purged = 0;
|
||||
|
||||
BUG_ON(!size);
|
||||
BUG_ON(size & ~PAGE_MASK);
|
||||
|
||||
va = kmalloc_node(sizeof(struct vmap_area),
|
||||
@ -344,6 +345,9 @@ retry:
|
||||
addr = ALIGN(vstart, align);
|
||||
|
||||
spin_lock(&vmap_area_lock);
|
||||
if (addr + size - 1 < addr)
|
||||
goto overflow;
|
||||
|
||||
/* XXX: could have a last_hole cache */
|
||||
n = vmap_area_root.rb_node;
|
||||
if (n) {
|
||||
@ -375,6 +379,8 @@ retry:
|
||||
|
||||
while (addr + size > first->va_start && addr + size <= vend) {
|
||||
addr = ALIGN(first->va_end + PAGE_SIZE, align);
|
||||
if (addr + size - 1 < addr)
|
||||
goto overflow;
|
||||
|
||||
n = rb_next(&first->rb_node);
|
||||
if (n)
|
||||
@ -385,6 +391,7 @@ retry:
|
||||
}
|
||||
found:
|
||||
if (addr + size > vend) {
|
||||
overflow:
|
||||
spin_unlock(&vmap_area_lock);
|
||||
if (!purged) {
|
||||
purge_vmap_area_lazy();
|
||||
@ -508,6 +515,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
|
||||
static DEFINE_SPINLOCK(purge_lock);
|
||||
LIST_HEAD(valist);
|
||||
struct vmap_area *va;
|
||||
struct vmap_area *n_va;
|
||||
int nr = 0;
|
||||
|
||||
/*
|
||||
@ -547,7 +555,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
|
||||
|
||||
if (nr) {
|
||||
spin_lock(&vmap_area_lock);
|
||||
list_for_each_entry(va, &valist, purge_list)
|
||||
list_for_each_entry_safe(va, n_va, &valist, purge_list)
|
||||
__free_vmap_area(va);
|
||||
spin_unlock(&vmap_area_lock);
|
||||
}
|
||||
@ -1347,6 +1355,7 @@ EXPORT_SYMBOL(vfree);
|
||||
void vunmap(const void *addr)
|
||||
{
|
||||
BUG_ON(in_interrupt());
|
||||
might_sleep();
|
||||
__vunmap(addr, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(vunmap);
|
||||
@ -1366,6 +1375,8 @@ void *vmap(struct page **pages, unsigned int count,
|
||||
{
|
||||
struct vm_struct *area;
|
||||
|
||||
might_sleep();
|
||||
|
||||
if (count > num_physpages)
|
||||
return NULL;
|
||||
|
||||
|
Reference in New Issue
Block a user