thp: mincore transparent hugepage support
Handle transparent huge page pmd entries natively instead of splitting them into subpages. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Reviewed-by: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
f2d6bfe9ff
commit
0ca1634d41
@@ -19,6 +19,9 @@ extern struct page *follow_trans_huge_pmd(struct mm_struct *mm,
|
|||||||
extern int zap_huge_pmd(struct mmu_gather *tlb,
|
extern int zap_huge_pmd(struct mmu_gather *tlb,
|
||||||
struct vm_area_struct *vma,
|
struct vm_area_struct *vma,
|
||||||
pmd_t *pmd);
|
pmd_t *pmd);
|
||||||
|
extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
||||||
|
unsigned long addr, unsigned long end,
|
||||||
|
unsigned char *vec);
|
||||||
|
|
||||||
enum transparent_hugepage_flag {
|
enum transparent_hugepage_flag {
|
||||||
TRANSPARENT_HUGEPAGE_FLAG,
|
TRANSPARENT_HUGEPAGE_FLAG,
|
||||||
|
@@ -923,6 +923,31 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
||||||
|
unsigned long addr, unsigned long end,
|
||||||
|
unsigned char *vec)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
spin_lock(&vma->vm_mm->page_table_lock);
|
||||||
|
if (likely(pmd_trans_huge(*pmd))) {
|
||||||
|
ret = !pmd_trans_splitting(*pmd);
|
||||||
|
spin_unlock(&vma->vm_mm->page_table_lock);
|
||||||
|
if (unlikely(!ret))
|
||||||
|
wait_split_huge_page(vma->anon_vma, pmd);
|
||||||
|
else {
|
||||||
|
/*
|
||||||
|
* All logical pages in the range are present
|
||||||
|
* if backed by a huge page.
|
||||||
|
*/
|
||||||
|
memset(vec, 1, (end - addr) >> PAGE_SHIFT);
|
||||||
|
}
|
||||||
|
} else
|
||||||
|
spin_unlock(&vma->vm_mm->page_table_lock);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
pmd_t *page_check_address_pmd(struct page *page,
|
pmd_t *page_check_address_pmd(struct page *page,
|
||||||
struct mm_struct *mm,
|
struct mm_struct *mm,
|
||||||
unsigned long address,
|
unsigned long address,
|
||||||
|
@@ -154,7 +154,13 @@ static void mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud,
|
|||||||
pmd = pmd_offset(pud, addr);
|
pmd = pmd_offset(pud, addr);
|
||||||
do {
|
do {
|
||||||
next = pmd_addr_end(addr, end);
|
next = pmd_addr_end(addr, end);
|
||||||
split_huge_page_pmd(vma->vm_mm, pmd);
|
if (pmd_trans_huge(*pmd)) {
|
||||||
|
if (mincore_huge_pmd(vma, pmd, addr, next, vec)) {
|
||||||
|
vec += (next - addr) >> PAGE_SHIFT;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
/* fall through */
|
||||||
|
}
|
||||||
if (pmd_none_or_clear_bad(pmd))
|
if (pmd_none_or_clear_bad(pmd))
|
||||||
mincore_unmapped_range(vma, addr, next, vec);
|
mincore_unmapped_range(vma, addr, next, vec);
|
||||||
else
|
else
|
||||||
|
Reference in New Issue
Block a user