[PATCH] Fix handling spurious page fault for hugetlb region
This reverts commit 3359b54c8c
and
replaces it with a cleaner version that is purely based on page table
operations, so that the synchronization between inode size and hugetlb
mappings becomes moot.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
committed by
Linus Torvalds
parent
93918e9afc
commit
ac9b9c667c
14
mm/memory.c
14
mm/memory.c
@@ -2045,18 +2045,8 @@ int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
|
||||
|
||||
inc_page_state(pgfault);
|
||||
|
||||
if (unlikely(is_vm_hugetlb_page(vma))) {
|
||||
if (valid_hugetlb_file_off(vma, address))
|
||||
/* We get here only if there was a stale(zero) TLB entry
|
||||
* (because of HW prefetching).
|
||||
* Low-level arch code (if needed) should have already
|
||||
* purged the stale entry as part of this fault handling.
|
||||
* Here we just return.
|
||||
*/
|
||||
return VM_FAULT_MINOR;
|
||||
else
|
||||
return VM_FAULT_SIGBUS; /* mapping truncation does this. */
|
||||
}
|
||||
if (unlikely(is_vm_hugetlb_page(vma)))
|
||||
return hugetlb_fault(mm, vma, address, write_access);
|
||||
|
||||
/*
|
||||
* We need the page table lock to synchronize with kswapd
|
||||
|
Reference in New Issue
Block a user