mm: use long type for page counts in mm_populate() and get_user_pages()
Use long type for page counts in mm_populate() so as to avoid integer overflow when running the following test code: int main(void) { void *p = mmap(NULL, 0x100000000000, PROT_READ, MAP_PRIVATE | MAP_ANON, -1, 0); printf("p: %p\n", p); mlockall(MCL_CURRENT); printf("done\n"); return 0; } Signed-off-by: Michel Lespinasse <walken@google.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Rik van Riel <riel@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
e0fb581529
commit
28a35716d3
@@ -43,9 +43,9 @@ int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
|
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
|
||||||
int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
|
long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
|
||||||
struct page **, struct vm_area_struct **,
|
struct page **, struct vm_area_struct **,
|
||||||
unsigned long *, int *, int, unsigned int flags);
|
unsigned long *, unsigned long *, long, unsigned int);
|
||||||
void unmap_hugepage_range(struct vm_area_struct *,
|
void unmap_hugepage_range(struct vm_area_struct *,
|
||||||
unsigned long, unsigned long, struct page *);
|
unsigned long, unsigned long, struct page *);
|
||||||
void __unmap_hugepage_range_final(struct mmu_gather *tlb,
|
void __unmap_hugepage_range_final(struct mmu_gather *tlb,
|
||||||
|
@@ -1013,13 +1013,14 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *
|
|||||||
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
|
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
|
||||||
void *buf, int len, int write);
|
void *buf, int len, int write);
|
||||||
|
|
||||||
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||||
unsigned long start, int len, unsigned int foll_flags,
|
unsigned long start, unsigned long nr_pages,
|
||||||
struct page **pages, struct vm_area_struct **vmas,
|
unsigned int foll_flags, struct page **pages,
|
||||||
int *nonblocking);
|
struct vm_area_struct **vmas, int *nonblocking);
|
||||||
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||||
unsigned long start, int nr_pages, int write, int force,
|
unsigned long start, unsigned long nr_pages,
|
||||||
struct page **pages, struct vm_area_struct **vmas);
|
int write, int force, struct page **pages,
|
||||||
|
struct vm_area_struct **vmas);
|
||||||
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||||
struct page **pages);
|
struct page **pages);
|
||||||
struct kvec;
|
struct kvec;
|
||||||
|
10
mm/hugetlb.c
10
mm/hugetlb.c
@@ -2920,14 +2920,14 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||||
struct page **pages, struct vm_area_struct **vmas,
|
struct page **pages, struct vm_area_struct **vmas,
|
||||||
unsigned long *position, int *length, int i,
|
unsigned long *position, unsigned long *nr_pages,
|
||||||
unsigned int flags)
|
long i, unsigned int flags)
|
||||||
{
|
{
|
||||||
unsigned long pfn_offset;
|
unsigned long pfn_offset;
|
||||||
unsigned long vaddr = *position;
|
unsigned long vaddr = *position;
|
||||||
int remainder = *length;
|
unsigned long remainder = *nr_pages;
|
||||||
struct hstate *h = hstate_vma(vma);
|
struct hstate *h = hstate_vma(vma);
|
||||||
|
|
||||||
spin_lock(&mm->page_table_lock);
|
spin_lock(&mm->page_table_lock);
|
||||||
@@ -2997,7 +2997,7 @@ same_page:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
spin_unlock(&mm->page_table_lock);
|
spin_unlock(&mm->page_table_lock);
|
||||||
*length = remainder;
|
*nr_pages = remainder;
|
||||||
*position = vaddr;
|
*position = vaddr;
|
||||||
|
|
||||||
return i ? i : -EFAULT;
|
return i ? i : -EFAULT;
|
||||||
|
18
mm/memory.c
18
mm/memory.c
@@ -1677,15 +1677,15 @@ static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long add
|
|||||||
* instead of __get_user_pages. __get_user_pages should be used only if
|
* instead of __get_user_pages. __get_user_pages should be used only if
|
||||||
* you need some special @gup_flags.
|
* you need some special @gup_flags.
|
||||||
*/
|
*/
|
||||||
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||||
unsigned long start, int nr_pages, unsigned int gup_flags,
|
unsigned long start, unsigned long nr_pages,
|
||||||
struct page **pages, struct vm_area_struct **vmas,
|
unsigned int gup_flags, struct page **pages,
|
||||||
int *nonblocking)
|
struct vm_area_struct **vmas, int *nonblocking)
|
||||||
{
|
{
|
||||||
int i;
|
long i;
|
||||||
unsigned long vm_flags;
|
unsigned long vm_flags;
|
||||||
|
|
||||||
if (nr_pages <= 0)
|
if (!nr_pages)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
|
VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
|
||||||
@@ -1981,9 +1981,9 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
|
|||||||
*
|
*
|
||||||
* See also get_user_pages_fast, for performance critical applications.
|
* See also get_user_pages_fast, for performance critical applications.
|
||||||
*/
|
*/
|
||||||
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||||
unsigned long start, int nr_pages, int write, int force,
|
unsigned long start, unsigned long nr_pages, int write,
|
||||||
struct page **pages, struct vm_area_struct **vmas)
|
int force, struct page **pages, struct vm_area_struct **vmas)
|
||||||
{
|
{
|
||||||
int flags = FOLL_TOUCH;
|
int flags = FOLL_TOUCH;
|
||||||
|
|
||||||
|
@@ -160,7 +160,7 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma,
|
|||||||
{
|
{
|
||||||
struct mm_struct *mm = vma->vm_mm;
|
struct mm_struct *mm = vma->vm_mm;
|
||||||
unsigned long addr = start;
|
unsigned long addr = start;
|
||||||
int nr_pages = (end - start) / PAGE_SIZE;
|
unsigned long nr_pages = (end - start) / PAGE_SIZE;
|
||||||
int gup_flags;
|
int gup_flags;
|
||||||
|
|
||||||
VM_BUG_ON(start & ~PAGE_MASK);
|
VM_BUG_ON(start & ~PAGE_MASK);
|
||||||
@@ -382,7 +382,7 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
|
|||||||
unsigned long end, nstart, nend;
|
unsigned long end, nstart, nend;
|
||||||
struct vm_area_struct *vma = NULL;
|
struct vm_area_struct *vma = NULL;
|
||||||
int locked = 0;
|
int locked = 0;
|
||||||
int ret = 0;
|
long ret = 0;
|
||||||
|
|
||||||
VM_BUG_ON(start & ~PAGE_MASK);
|
VM_BUG_ON(start & ~PAGE_MASK);
|
||||||
VM_BUG_ON(len != PAGE_ALIGN(len));
|
VM_BUG_ON(len != PAGE_ALIGN(len));
|
||||||
|
15
mm/nommu.c
15
mm/nommu.c
@@ -140,10 +140,10 @@ unsigned int kobjsize(const void *objp)
|
|||||||
return PAGE_SIZE << compound_order(page);
|
return PAGE_SIZE << compound_order(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||||
unsigned long start, int nr_pages, unsigned int foll_flags,
|
unsigned long start, unsigned long nr_pages,
|
||||||
struct page **pages, struct vm_area_struct **vmas,
|
unsigned int foll_flags, struct page **pages,
|
||||||
int *retry)
|
struct vm_area_struct **vmas, int *nonblocking)
|
||||||
{
|
{
|
||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
unsigned long vm_flags;
|
unsigned long vm_flags;
|
||||||
@@ -190,9 +190,10 @@ finish_or_fault:
|
|||||||
* slab page or a secondary page from a compound page
|
* slab page or a secondary page from a compound page
|
||||||
* - don't permit access to VMAs that don't support it, such as I/O mappings
|
* - don't permit access to VMAs that don't support it, such as I/O mappings
|
||||||
*/
|
*/
|
||||||
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||||
unsigned long start, int nr_pages, int write, int force,
|
unsigned long start, unsigned long nr_pages,
|
||||||
struct page **pages, struct vm_area_struct **vmas)
|
int write, int force, struct page **pages,
|
||||||
|
struct vm_area_struct **vmas)
|
||||||
{
|
{
|
||||||
int flags = 0;
|
int flags = 0;
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user