x86, pat: Lookup the protection from memtype list on vm_insert_pfn()
Lookup the reserved memtype during vm_insert_pfn and use that memtype for the new mapping. This takes care or handling of vm_insert_pfn() interface in track_pfn_vma*/untrack_pfn_vma. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
This commit is contained in:
committed by
H. Peter Anvin
parent
637b86e75f
commit
1087637616
@@ -848,11 +848,6 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
|
|||||||
unsigned long vma_size = vma->vm_end - vma->vm_start;
|
unsigned long vma_size = vma->vm_end - vma->vm_start;
|
||||||
pgprot_t pgprot;
|
pgprot_t pgprot;
|
||||||
|
|
||||||
/*
|
|
||||||
* For now, only handle remap_pfn_range() vmas where
|
|
||||||
* is_linear_pfn_mapping() == TRUE. Handling of
|
|
||||||
* vm_insert_pfn() is TBD.
|
|
||||||
*/
|
|
||||||
if (is_linear_pfn_mapping(vma)) {
|
if (is_linear_pfn_mapping(vma)) {
|
||||||
/*
|
/*
|
||||||
* reserve the whole chunk covered by vma. We need the
|
* reserve the whole chunk covered by vma. We need the
|
||||||
@@ -880,20 +875,24 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
|
|||||||
int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
|
int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
|
||||||
unsigned long pfn, unsigned long size)
|
unsigned long pfn, unsigned long size)
|
||||||
{
|
{
|
||||||
|
unsigned long flags;
|
||||||
resource_size_t paddr;
|
resource_size_t paddr;
|
||||||
unsigned long vma_size = vma->vm_end - vma->vm_start;
|
unsigned long vma_size = vma->vm_end - vma->vm_start;
|
||||||
|
|
||||||
/*
|
|
||||||
* For now, only handle remap_pfn_range() vmas where
|
|
||||||
* is_linear_pfn_mapping() == TRUE. Handling of
|
|
||||||
* vm_insert_pfn() is TBD.
|
|
||||||
*/
|
|
||||||
if (is_linear_pfn_mapping(vma)) {
|
if (is_linear_pfn_mapping(vma)) {
|
||||||
/* reserve the whole chunk starting from vm_pgoff */
|
/* reserve the whole chunk starting from vm_pgoff */
|
||||||
paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
|
paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
|
||||||
return reserve_pfn_range(paddr, vma_size, prot, 0);
|
return reserve_pfn_range(paddr, vma_size, prot, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!pat_enabled)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/* for vm_insert_pfn and friends, we set prot based on lookup */
|
||||||
|
flags = lookup_memtype(pfn << PAGE_SHIFT);
|
||||||
|
*prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
|
||||||
|
flags);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -908,11 +907,6 @@ void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
|
|||||||
resource_size_t paddr;
|
resource_size_t paddr;
|
||||||
unsigned long vma_size = vma->vm_end - vma->vm_start;
|
unsigned long vma_size = vma->vm_end - vma->vm_start;
|
||||||
|
|
||||||
/*
|
|
||||||
* For now, only handle remap_pfn_range() vmas where
|
|
||||||
* is_linear_pfn_mapping() == TRUE. Handling of
|
|
||||||
* vm_insert_pfn() is TBD.
|
|
||||||
*/
|
|
||||||
if (is_linear_pfn_mapping(vma)) {
|
if (is_linear_pfn_mapping(vma)) {
|
||||||
/* free the whole chunk starting from vm_pgoff */
|
/* free the whole chunk starting from vm_pgoff */
|
||||||
paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
|
paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
|
||||||
|
Reference in New Issue
Block a user