mm: re-architect the VM_UNPAGED logic
This replaces the (in my opinion horrible) VM_UNMAPPED logic with very explicit support for a "remapped page range" aka VM_PFNMAP. It allows a VM area to contain an arbitrary range of page table entries that the VM never touches, and never considers to be normal pages. Any user of "remap_pfn_range()" automatically gets this new functionality, and doesn't even have to mark the pages reserved or indeed mark them any other way. It just works. As a side effect, doing mmap() on /dev/mem works for arbitrary ranges. Sparc update from David in the next commit. Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
12
mm/msync.c
12
mm/msync.c
@ -27,7 +27,6 @@ static void msync_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
again:
|
||||
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
|
||||
do {
|
||||
unsigned long pfn;
|
||||
struct page *page;
|
||||
|
||||
if (progress >= 64) {
|
||||
@ -40,13 +39,9 @@ again:
|
||||
continue;
|
||||
if (!pte_maybe_dirty(*pte))
|
||||
continue;
|
||||
pfn = pte_pfn(*pte);
|
||||
if (unlikely(!pfn_valid(pfn))) {
|
||||
print_bad_pte(vma, *pte, addr);
|
||||
page = vm_normal_page(vma, addr, *pte);
|
||||
if (!page)
|
||||
continue;
|
||||
}
|
||||
page = pfn_to_page(pfn);
|
||||
|
||||
if (ptep_clear_flush_dirty(vma, addr, pte) ||
|
||||
page_test_and_clear_dirty(page))
|
||||
set_page_dirty(page);
|
||||
@ -97,9 +92,8 @@ static void msync_page_range(struct vm_area_struct *vma,
|
||||
/* For hugepages we can't go walking the page table normally,
|
||||
* but that's ok, hugetlbfs is memory based, so we don't need
|
||||
* to do anything more on an msync().
|
||||
* Can't do anything with VM_UNPAGED regions either.
|
||||
*/
|
||||
if (vma->vm_flags & (VM_HUGETLB|VM_UNPAGED))
|
||||
if (vma->vm_flags & VM_HUGETLB)
|
||||
return;
|
||||
|
||||
BUG_ON(addr >= end);
|
||||
|
Reference in New Issue
Block a user