x86 PAT: change track_pfn_vma_new to take pgprot_t pointer param
Impact: cleanup Change the protection parameter for track_pfn_vma_new() into a pgprot_t pointer. Subsequent patch changes the x86 PAT handling to return a compatible memtype in pgprot_t, if what was requested cannot be allowed due to conflicts. No fuctionality change in this patch. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
committed by
Ingo Molnar
parent
afc7d20c84
commit
e4b866ed19
@@ -741,7 +741,7 @@ cleanup_ret:
|
|||||||
* Note that this function can be called with caller trying to map only a
|
* Note that this function can be called with caller trying to map only a
|
||||||
* subrange/page inside the vma.
|
* subrange/page inside the vma.
|
||||||
*/
|
*/
|
||||||
int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
|
int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
|
||||||
unsigned long pfn, unsigned long size)
|
unsigned long pfn, unsigned long size)
|
||||||
{
|
{
|
||||||
int retval = 0;
|
int retval = 0;
|
||||||
@@ -758,14 +758,14 @@ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
|
|||||||
if (is_linear_pfn_mapping(vma)) {
|
if (is_linear_pfn_mapping(vma)) {
|
||||||
/* reserve the whole chunk starting from vm_pgoff */
|
/* reserve the whole chunk starting from vm_pgoff */
|
||||||
paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
|
paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
|
||||||
return reserve_pfn_range(paddr, vma_size, prot);
|
return reserve_pfn_range(paddr, vma_size, *prot);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* reserve page by page using pfn and size */
|
/* reserve page by page using pfn and size */
|
||||||
base_paddr = (resource_size_t)pfn << PAGE_SHIFT;
|
base_paddr = (resource_size_t)pfn << PAGE_SHIFT;
|
||||||
for (i = 0; i < size; i += PAGE_SIZE) {
|
for (i = 0; i < size; i += PAGE_SIZE) {
|
||||||
paddr = base_paddr + i;
|
paddr = base_paddr + i;
|
||||||
retval = reserve_pfn_range(paddr, PAGE_SIZE, prot);
|
retval = reserve_pfn_range(paddr, PAGE_SIZE, *prot);
|
||||||
if (retval)
|
if (retval)
|
||||||
goto cleanup_ret;
|
goto cleanup_ret;
|
||||||
}
|
}
|
||||||
|
@@ -301,7 +301,7 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm,
|
|||||||
* track_pfn_vma_new is called when a _new_ pfn mapping is being established
|
* track_pfn_vma_new is called when a _new_ pfn mapping is being established
|
||||||
* for physical range indicated by pfn and size.
|
* for physical range indicated by pfn and size.
|
||||||
*/
|
*/
|
||||||
static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
|
static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
|
||||||
unsigned long pfn, unsigned long size)
|
unsigned long pfn, unsigned long size)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
@@ -332,7 +332,7 @@ static inline void untrack_pfn_vma(struct vm_area_struct *vma,
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
|
extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
|
||||||
unsigned long pfn, unsigned long size);
|
unsigned long pfn, unsigned long size);
|
||||||
extern int track_pfn_vma_copy(struct vm_area_struct *vma);
|
extern int track_pfn_vma_copy(struct vm_area_struct *vma);
|
||||||
extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
|
extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
|
||||||
|
@@ -1511,6 +1511,7 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
|||||||
unsigned long pfn)
|
unsigned long pfn)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
pgprot_t pgprot = vma->vm_page_prot;
|
||||||
/*
|
/*
|
||||||
* Technically, architectures with pte_special can avoid all these
|
* Technically, architectures with pte_special can avoid all these
|
||||||
* restrictions (same for remap_pfn_range). However we would like
|
* restrictions (same for remap_pfn_range). However we would like
|
||||||
@@ -1525,10 +1526,10 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
|||||||
|
|
||||||
if (addr < vma->vm_start || addr >= vma->vm_end)
|
if (addr < vma->vm_start || addr >= vma->vm_end)
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
if (track_pfn_vma_new(vma, vma->vm_page_prot, pfn, PAGE_SIZE))
|
if (track_pfn_vma_new(vma, &pgprot, pfn, PAGE_SIZE))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
ret = insert_pfn(vma, addr, pfn, vma->vm_page_prot);
|
ret = insert_pfn(vma, addr, pfn, pgprot);
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
untrack_pfn_vma(vma, pfn, PAGE_SIZE);
|
untrack_pfn_vma(vma, pfn, PAGE_SIZE);
|
||||||
@@ -1671,7 +1672,7 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
|
|||||||
|
|
||||||
vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
|
vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
|
||||||
|
|
||||||
err = track_pfn_vma_new(vma, prot, pfn, PAGE_ALIGN(size));
|
err = track_pfn_vma_new(vma, &prot, pfn, PAGE_ALIGN(size));
|
||||||
if (err) {
|
if (err) {
|
||||||
/*
|
/*
|
||||||
* To indicate that track_pfn related cleanup is not
|
* To indicate that track_pfn related cleanup is not
|
||||||
|
Reference in New Issue
Block a user