mm: re-architect the VM_UNPAGED logic
This replaces the (in my opinion horrible) VM_UNMAPPED logic with very explicit support for a "remapped page range" aka VM_PFNMAP. It allows a VM area to contain an arbitrary range of page table entries that the VM never touches, and never considers to be normal pages. Any user of "remap_pfn_range()" automatically gets this new functionality, and doesn't even have to mark the pages reserved or indeed mark them any other way. It just works. As a side effect, doing mmap() on /dev/mem works for arbitrary ranges. Sparc update from David in the next commit. Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
@ -402,12 +402,11 @@ struct numa_maps {
|
||||
/*
|
||||
* Calculate numa node maps for a vma
|
||||
*/
|
||||
static struct numa_maps *get_numa_maps(const struct vm_area_struct *vma)
|
||||
static struct numa_maps *get_numa_maps(struct vm_area_struct *vma)
|
||||
{
|
||||
int i;
|
||||
struct page *page;
|
||||
unsigned long vaddr;
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
int i;
|
||||
struct numa_maps *md = kmalloc(sizeof(struct numa_maps), GFP_KERNEL);
|
||||
|
||||
if (!md)
|
||||
@ -420,7 +419,7 @@ static struct numa_maps *get_numa_maps(const struct vm_area_struct *vma)
|
||||
md->node[i] =0;
|
||||
|
||||
for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) {
|
||||
page = follow_page(mm, vaddr, 0);
|
||||
page = follow_page(vma, vaddr, 0);
|
||||
if (page) {
|
||||
int count = page_mapcount(page);
|
||||
|
||||
|
Reference in New Issue
Block a user