hugetlbfs: handle pages higher order than MAX_ORDER
When working with hugepages, hugetlbfs assumes that those hugepages are smaller than MAX_ORDER. Specifically it assumes that the mem_map is contigious and uses that to optimise access to the elements of the mem_map that represent the hugepage. Gigantic pages (such as 16GB pages on powerpc) by definition are of greater order than MAX_ORDER (larger than MAX_ORDER_NR_PAGES in size). This means that we can no longer make use of the buddy alloctor guarentees for the contiguity of the mem_map, which ensures that the mem_map is at least contigious for maximmally aligned areas of MAX_ORDER_NR_PAGES pages. This patch adds new mem_map accessors and iterator helpers which handle any discontiguity at MAX_ORDER_NR_PAGES boundaries. It then uses these to implement gigantic page versions of copy_huge_page and clear_huge_page, and to allow follow_hugetlb_page handle gigantic pages. Signed-off-by: Andy Whitcroft <apw@shadowen.org> Cc: Jon Tollefson <kniht@linux.vnet.ibm.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: <stable@kernel.org> [2.6.27.x] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
22bece00dc
commit
69d177c2fc
37
mm/hugetlb.c
37
mm/hugetlb.c
@@ -354,11 +354,26 @@ static int vma_has_reserves(struct vm_area_struct *vma)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void clear_gigantic_page(struct page *page,
|
||||
unsigned long addr, unsigned long sz)
|
||||
{
|
||||
int i;
|
||||
struct page *p = page;
|
||||
|
||||
might_sleep();
|
||||
for (i = 0; i < sz/PAGE_SIZE; i++, p = mem_map_next(p, page, i)) {
|
||||
cond_resched();
|
||||
clear_user_highpage(p, addr + i * PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
static void clear_huge_page(struct page *page,
|
||||
unsigned long addr, unsigned long sz)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (unlikely(sz > MAX_ORDER_NR_PAGES))
|
||||
return clear_gigantic_page(page, addr, sz);
|
||||
|
||||
might_sleep();
|
||||
for (i = 0; i < sz/PAGE_SIZE; i++) {
|
||||
cond_resched();
|
||||
@@ -366,12 +381,32 @@ static void clear_huge_page(struct page *page,
|
||||
}
|
||||
}
|
||||
|
||||
static void copy_gigantic_page(struct page *dst, struct page *src,
|
||||
unsigned long addr, struct vm_area_struct *vma)
|
||||
{
|
||||
int i;
|
||||
struct hstate *h = hstate_vma(vma);
|
||||
struct page *dst_base = dst;
|
||||
struct page *src_base = src;
|
||||
might_sleep();
|
||||
for (i = 0; i < pages_per_huge_page(h); ) {
|
||||
cond_resched();
|
||||
copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
|
||||
|
||||
i++;
|
||||
dst = mem_map_next(dst, dst_base, i);
|
||||
src = mem_map_next(src, src_base, i);
|
||||
}
|
||||
}
|
||||
static void copy_huge_page(struct page *dst, struct page *src,
|
||||
unsigned long addr, struct vm_area_struct *vma)
|
||||
{
|
||||
int i;
|
||||
struct hstate *h = hstate_vma(vma);
|
||||
|
||||
if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES))
|
||||
return copy_gigantic_page(dst, src, addr, vma);
|
||||
|
||||
might_sleep();
|
||||
for (i = 0; i < pages_per_huge_page(h); i++) {
|
||||
cond_resched();
|
||||
@@ -2130,7 +2165,7 @@ same_page:
|
||||
if (zeropage_ok)
|
||||
pages[i] = ZERO_PAGE(0);
|
||||
else
|
||||
pages[i] = page + pfn_offset;
|
||||
pages[i] = mem_map_offset(page, pfn_offset);
|
||||
get_page(pages[i]);
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user