Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86: fix microcode driver newly spewing warnings x86, PAT: Remove page granularity tracking for vm_insert_pfn maps x86: disable X86_PTRACE_BTS for now x86, documentation: kernel-parameters replace X86-32,X86-64 with X86 x86: pci-swiotlb.c swiotlb_dma_ops should be static x86, PAT: Remove duplicate memtype reserve in devmem mmap x86, PAT: Consolidate code in pat_x_mtrr_type() and reserve_memtype() x86, PAT: Changing memtype to WC ensuring no WB alias x86, PAT: Handle faults cleanly in set_memory_ APIs x86, PAT: Change order of cpa and free in set_memory_wb x86, CPA: Change idmap attribute before ioremap attribute setup
This commit is contained in:
@@ -280,15 +280,16 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
|
||||
return NULL;
|
||||
area->phys_addr = phys_addr;
|
||||
vaddr = (unsigned long) area->addr;
|
||||
if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
|
||||
|
||||
if (kernel_map_sync_memtype(phys_addr, size, prot_val)) {
|
||||
free_memtype(phys_addr, phys_addr + size);
|
||||
free_vm_area(area);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
|
||||
if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
|
||||
free_memtype(phys_addr, phys_addr + size);
|
||||
vunmap(area->addr);
|
||||
free_vm_area(area);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -374,7 +375,8 @@ static void __iomem *ioremap_default(resource_size_t phys_addr,
|
||||
* - UC_MINUS for non-WB-able memory with no other conflicting mappings
|
||||
* - Inherit from confliting mappings otherwise
|
||||
*/
|
||||
err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags);
|
||||
err = reserve_memtype(phys_addr, phys_addr + size,
|
||||
_PAGE_CACHE_WB, &flags);
|
||||
if (err < 0)
|
||||
return NULL;
|
||||
|
||||
|
@@ -945,71 +945,94 @@ int _set_memory_uc(unsigned long addr, int numpages)
|
||||
|
||||
int set_memory_uc(unsigned long addr, int numpages)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* for now UC MINUS. see comments in ioremap_nocache()
|
||||
*/
|
||||
if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
|
||||
_PAGE_CACHE_UC_MINUS, NULL))
|
||||
return -EINVAL;
|
||||
ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
|
||||
_PAGE_CACHE_UC_MINUS, NULL);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
return _set_memory_uc(addr, numpages);
|
||||
ret = _set_memory_uc(addr, numpages);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
return 0;
|
||||
|
||||
out_free:
|
||||
free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
|
||||
out_err:
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(set_memory_uc);
|
||||
|
||||
int set_memory_array_uc(unsigned long *addr, int addrinarray)
|
||||
{
|
||||
unsigned long start;
|
||||
unsigned long end;
|
||||
int i;
|
||||
int i, j;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* for now UC MINUS. see comments in ioremap_nocache()
|
||||
*/
|
||||
for (i = 0; i < addrinarray; i++) {
|
||||
start = __pa(addr[i]);
|
||||
for (end = start + PAGE_SIZE; i < addrinarray - 1; end += PAGE_SIZE) {
|
||||
if (end != __pa(addr[i + 1]))
|
||||
break;
|
||||
i++;
|
||||
}
|
||||
if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL))
|
||||
goto out;
|
||||
ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE,
|
||||
_PAGE_CACHE_UC_MINUS, NULL);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
return change_page_attr_set(addr, addrinarray,
|
||||
ret = change_page_attr_set(addr, addrinarray,
|
||||
__pgprot(_PAGE_CACHE_UC_MINUS), 1);
|
||||
out:
|
||||
for (i = 0; i < addrinarray; i++) {
|
||||
unsigned long tmp = __pa(addr[i]);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
if (tmp == start)
|
||||
break;
|
||||
for (end = tmp + PAGE_SIZE; i < addrinarray - 1; end += PAGE_SIZE) {
|
||||
if (end != __pa(addr[i + 1]))
|
||||
break;
|
||||
i++;
|
||||
}
|
||||
free_memtype(tmp, end);
|
||||
}
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
|
||||
out_free:
|
||||
for (j = 0; j < i; j++)
|
||||
free_memtype(__pa(addr[j]), __pa(addr[j]) + PAGE_SIZE);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(set_memory_array_uc);
|
||||
|
||||
int _set_memory_wc(unsigned long addr, int numpages)
|
||||
{
|
||||
return change_page_attr_set(&addr, numpages,
|
||||
int ret;
|
||||
ret = change_page_attr_set(&addr, numpages,
|
||||
__pgprot(_PAGE_CACHE_UC_MINUS), 0);
|
||||
|
||||
if (!ret) {
|
||||
ret = change_page_attr_set(&addr, numpages,
|
||||
__pgprot(_PAGE_CACHE_WC), 0);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int set_memory_wc(unsigned long addr, int numpages)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!pat_enabled)
|
||||
return set_memory_uc(addr, numpages);
|
||||
|
||||
if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
|
||||
_PAGE_CACHE_WC, NULL))
|
||||
return -EINVAL;
|
||||
ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
|
||||
_PAGE_CACHE_WC, NULL);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
return _set_memory_wc(addr, numpages);
|
||||
ret = _set_memory_wc(addr, numpages);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
return 0;
|
||||
|
||||
out_free:
|
||||
free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
|
||||
out_err:
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(set_memory_wc);
|
||||
|
||||
@@ -1021,29 +1044,31 @@ int _set_memory_wb(unsigned long addr, int numpages)
|
||||
|
||||
int set_memory_wb(unsigned long addr, int numpages)
|
||||
{
|
||||
free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
|
||||
int ret;
|
||||
|
||||
return _set_memory_wb(addr, numpages);
|
||||
ret = _set_memory_wb(addr, numpages);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(set_memory_wb);
|
||||
|
||||
int set_memory_array_wb(unsigned long *addr, int addrinarray)
|
||||
{
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
for (i = 0; i < addrinarray; i++) {
|
||||
unsigned long start = __pa(addr[i]);
|
||||
unsigned long end;
|
||||
|
||||
for (end = start + PAGE_SIZE; i < addrinarray - 1; end += PAGE_SIZE) {
|
||||
if (end != __pa(addr[i + 1]))
|
||||
break;
|
||||
i++;
|
||||
}
|
||||
free_memtype(start, end);
|
||||
}
|
||||
return change_page_attr_clear(addr, addrinarray,
|
||||
ret = change_page_attr_clear(addr, addrinarray,
|
||||
__pgprot(_PAGE_CACHE_MASK), 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < addrinarray; i++)
|
||||
free_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(set_memory_array_wb);
|
||||
|
||||
@@ -1136,6 +1161,8 @@ int set_pages_array_wb(struct page **pages, int addrinarray)
|
||||
|
||||
retval = cpa_clear_pages_array(pages, addrinarray,
|
||||
__pgprot(_PAGE_CACHE_MASK));
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
for (i = 0; i < addrinarray; i++) {
|
||||
start = (unsigned long)page_address(pages[i]);
|
||||
@@ -1143,7 +1170,7 @@ int set_pages_array_wb(struct page **pages, int addrinarray)
|
||||
free_memtype(start, end);
|
||||
}
|
||||
|
||||
return retval;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(set_pages_array_wb);
|
||||
|
||||
|
@@ -182,10 +182,10 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
|
||||
u8 mtrr_type;
|
||||
|
||||
mtrr_type = mtrr_type_lookup(start, end);
|
||||
if (mtrr_type == MTRR_TYPE_UNCACHABLE)
|
||||
return _PAGE_CACHE_UC;
|
||||
if (mtrr_type == MTRR_TYPE_WRCOMB)
|
||||
return _PAGE_CACHE_WC;
|
||||
if (mtrr_type != MTRR_TYPE_WRBACK)
|
||||
return _PAGE_CACHE_UC_MINUS;
|
||||
|
||||
return _PAGE_CACHE_WB;
|
||||
}
|
||||
|
||||
return req_type;
|
||||
@@ -352,23 +352,13 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (req_type == -1) {
|
||||
/*
|
||||
* Call mtrr_lookup to get the type hint. This is an
|
||||
* optimization for /dev/mem mmap'ers into WB memory (BIOS
|
||||
* tools and ACPI tools). Use WB request for WB memory and use
|
||||
* UC_MINUS otherwise.
|
||||
*/
|
||||
u8 mtrr_type = mtrr_type_lookup(start, end);
|
||||
|
||||
if (mtrr_type == MTRR_TYPE_WRBACK)
|
||||
actual_type = _PAGE_CACHE_WB;
|
||||
else
|
||||
actual_type = _PAGE_CACHE_UC_MINUS;
|
||||
} else {
|
||||
actual_type = pat_x_mtrr_type(start, end,
|
||||
req_type & _PAGE_CACHE_MASK);
|
||||
}
|
||||
/*
|
||||
* Call mtrr_lookup to get the type hint. This is an
|
||||
* optimization for /dev/mem mmap'ers into WB memory (BIOS
|
||||
* tools and ACPI tools). Use WB request for WB memory and use
|
||||
* UC_MINUS otherwise.
|
||||
*/
|
||||
actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK);
|
||||
|
||||
if (new_type)
|
||||
*new_type = actual_type;
|
||||
@@ -546,9 +536,7 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
|
||||
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
|
||||
unsigned long size, pgprot_t *vma_prot)
|
||||
{
|
||||
u64 offset = ((u64) pfn) << PAGE_SHIFT;
|
||||
unsigned long flags = -1;
|
||||
int retval;
|
||||
unsigned long flags = _PAGE_CACHE_WB;
|
||||
|
||||
if (!range_is_allowed(pfn, size))
|
||||
return 0;
|
||||
@@ -576,64 +564,11 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* With O_SYNC, we can only take UC_MINUS mapping. Fail if we cannot.
|
||||
*
|
||||
* Without O_SYNC, we want to get
|
||||
* - WB for WB-able memory and no other conflicting mappings
|
||||
* - UC_MINUS for non-WB-able memory with no other conflicting mappings
|
||||
* - Inherit from confliting mappings otherwise
|
||||
*/
|
||||
if (flags != -1) {
|
||||
retval = reserve_memtype(offset, offset + size, flags, NULL);
|
||||
} else {
|
||||
retval = reserve_memtype(offset, offset + size, -1, &flags);
|
||||
}
|
||||
|
||||
if (retval < 0)
|
||||
return 0;
|
||||
|
||||
if (((pfn < max_low_pfn_mapped) ||
|
||||
(pfn >= (1UL<<(32 - PAGE_SHIFT)) && pfn < max_pfn_mapped)) &&
|
||||
ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) {
|
||||
free_memtype(offset, offset + size);
|
||||
printk(KERN_INFO
|
||||
"%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
|
||||
current->comm, current->pid,
|
||||
cattr_name(flags),
|
||||
offset, (unsigned long long)(offset + size));
|
||||
return 0;
|
||||
}
|
||||
|
||||
*vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
|
||||
flags);
|
||||
return 1;
|
||||
}
|
||||
|
||||
void map_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
|
||||
{
|
||||
unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK);
|
||||
u64 addr = (u64)pfn << PAGE_SHIFT;
|
||||
unsigned long flags;
|
||||
|
||||
reserve_memtype(addr, addr + size, want_flags, &flags);
|
||||
if (flags != want_flags) {
|
||||
printk(KERN_INFO
|
||||
"%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n",
|
||||
current->comm, current->pid,
|
||||
cattr_name(want_flags),
|
||||
addr, (unsigned long long)(addr + size),
|
||||
cattr_name(flags));
|
||||
}
|
||||
}
|
||||
|
||||
void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
|
||||
{
|
||||
u64 addr = (u64)pfn << PAGE_SHIFT;
|
||||
|
||||
free_memtype(addr, addr + size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Change the memory type for the physial address range in kernel identity
|
||||
* mapping space if that range is a part of identity map.
|
||||
@@ -671,8 +606,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
|
||||
{
|
||||
int is_ram = 0;
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
|
||||
unsigned long flags = want_flags;
|
||||
|
||||
is_ram = pat_pagerange_is_ram(paddr, paddr + size);
|
||||
|
||||
@@ -734,29 +669,28 @@ static void free_pfn_range(u64 paddr, unsigned long size)
|
||||
*
|
||||
* If the vma has a linear pfn mapping for the entire range, we get the prot
|
||||
* from pte and reserve the entire vma range with single reserve_pfn_range call.
|
||||
* Otherwise, we reserve the entire vma range, my ging through the PTEs page
|
||||
* by page to get physical address and protection.
|
||||
*/
|
||||
int track_pfn_vma_copy(struct vm_area_struct *vma)
|
||||
{
|
||||
int retval = 0;
|
||||
unsigned long i, j;
|
||||
resource_size_t paddr;
|
||||
unsigned long prot;
|
||||
unsigned long vma_start = vma->vm_start;
|
||||
unsigned long vma_end = vma->vm_end;
|
||||
unsigned long vma_size = vma_end - vma_start;
|
||||
unsigned long vma_size = vma->vm_end - vma->vm_start;
|
||||
pgprot_t pgprot;
|
||||
|
||||
if (!pat_enabled)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* For now, only handle remap_pfn_range() vmas where
|
||||
* is_linear_pfn_mapping() == TRUE. Handling of
|
||||
* vm_insert_pfn() is TBD.
|
||||
*/
|
||||
if (is_linear_pfn_mapping(vma)) {
|
||||
/*
|
||||
* reserve the whole chunk covered by vma. We need the
|
||||
* starting address and protection from pte.
|
||||
*/
|
||||
if (follow_phys(vma, vma_start, 0, &prot, &paddr)) {
|
||||
if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
|
||||
WARN_ON_ONCE(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -764,28 +698,7 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
|
||||
return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
|
||||
}
|
||||
|
||||
/* reserve entire vma page by page, using pfn and prot from pte */
|
||||
for (i = 0; i < vma_size; i += PAGE_SIZE) {
|
||||
if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
|
||||
continue;
|
||||
|
||||
pgprot = __pgprot(prot);
|
||||
retval = reserve_pfn_range(paddr, PAGE_SIZE, &pgprot, 1);
|
||||
if (retval)
|
||||
goto cleanup_ret;
|
||||
}
|
||||
return 0;
|
||||
|
||||
cleanup_ret:
|
||||
/* Reserve error: Cleanup partial reservation and return error */
|
||||
for (j = 0; j < i; j += PAGE_SIZE) {
|
||||
if (follow_phys(vma, vma_start + j, 0, &prot, &paddr))
|
||||
continue;
|
||||
|
||||
free_pfn_range(paddr, PAGE_SIZE);
|
||||
}
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -795,50 +708,28 @@ cleanup_ret:
|
||||
* prot is passed in as a parameter for the new mapping. If the vma has a
|
||||
* linear pfn mapping for the entire range reserve the entire vma range with
|
||||
* single reserve_pfn_range call.
|
||||
* Otherwise, we look t the pfn and size and reserve only the specified range
|
||||
* page by page.
|
||||
*
|
||||
* Note that this function can be called with caller trying to map only a
|
||||
* subrange/page inside the vma.
|
||||
*/
|
||||
int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
|
||||
unsigned long pfn, unsigned long size)
|
||||
{
|
||||
int retval = 0;
|
||||
unsigned long i, j;
|
||||
resource_size_t base_paddr;
|
||||
resource_size_t paddr;
|
||||
unsigned long vma_start = vma->vm_start;
|
||||
unsigned long vma_end = vma->vm_end;
|
||||
unsigned long vma_size = vma_end - vma_start;
|
||||
unsigned long vma_size = vma->vm_end - vma->vm_start;
|
||||
|
||||
if (!pat_enabled)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* For now, only handle remap_pfn_range() vmas where
|
||||
* is_linear_pfn_mapping() == TRUE. Handling of
|
||||
* vm_insert_pfn() is TBD.
|
||||
*/
|
||||
if (is_linear_pfn_mapping(vma)) {
|
||||
/* reserve the whole chunk starting from vm_pgoff */
|
||||
paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
|
||||
return reserve_pfn_range(paddr, vma_size, prot, 0);
|
||||
}
|
||||
|
||||
/* reserve page by page using pfn and size */
|
||||
base_paddr = (resource_size_t)pfn << PAGE_SHIFT;
|
||||
for (i = 0; i < size; i += PAGE_SIZE) {
|
||||
paddr = base_paddr + i;
|
||||
retval = reserve_pfn_range(paddr, PAGE_SIZE, prot, 0);
|
||||
if (retval)
|
||||
goto cleanup_ret;
|
||||
}
|
||||
return 0;
|
||||
|
||||
cleanup_ret:
|
||||
/* Reserve error: Cleanup partial reservation and return error */
|
||||
for (j = 0; j < i; j += PAGE_SIZE) {
|
||||
paddr = base_paddr + j;
|
||||
free_pfn_range(paddr, PAGE_SIZE);
|
||||
}
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -849,39 +740,23 @@ cleanup_ret:
|
||||
void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
|
||||
unsigned long size)
|
||||
{
|
||||
unsigned long i;
|
||||
resource_size_t paddr;
|
||||
unsigned long prot;
|
||||
unsigned long vma_start = vma->vm_start;
|
||||
unsigned long vma_end = vma->vm_end;
|
||||
unsigned long vma_size = vma_end - vma_start;
|
||||
unsigned long vma_size = vma->vm_end - vma->vm_start;
|
||||
|
||||
if (!pat_enabled)
|
||||
return;
|
||||
|
||||
/*
|
||||
* For now, only handle remap_pfn_range() vmas where
|
||||
* is_linear_pfn_mapping() == TRUE. Handling of
|
||||
* vm_insert_pfn() is TBD.
|
||||
*/
|
||||
if (is_linear_pfn_mapping(vma)) {
|
||||
/* free the whole chunk starting from vm_pgoff */
|
||||
paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
|
||||
free_pfn_range(paddr, vma_size);
|
||||
return;
|
||||
}
|
||||
|
||||
if (size != 0 && size != vma_size) {
|
||||
/* free page by page, using pfn and size */
|
||||
paddr = (resource_size_t)pfn << PAGE_SHIFT;
|
||||
for (i = 0; i < size; i += PAGE_SIZE) {
|
||||
paddr = paddr + i;
|
||||
free_pfn_range(paddr, PAGE_SIZE);
|
||||
}
|
||||
} else {
|
||||
/* free entire vma, page by page, using the pfn from pte */
|
||||
for (i = 0; i < vma_size; i += PAGE_SIZE) {
|
||||
if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
|
||||
continue;
|
||||
|
||||
free_pfn_range(paddr, PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pgprot_t pgprot_writecombine(pgprot_t prot)
|
||||
|
Reference in New Issue
Block a user