MIPS: Remove race window in page fault handling

Multicore MIPSes without I/D hardware coherency suffered from a race
condition in the page fault handler. The page table entry was
published before any pending lazy D-cache flush was committed, hence
it allowed execution of stale page cache data by other VPEs in the
system.

To make the cache handling safe we need to perform flushing already in
the set_pte_at function. MIPSes without coherent I-caches can get a
small increase in flushes due to the unavailability of the execute
flag in set_pte_at.

[ralf@linux-mips.org: outlining set_pte_at() saves a good k in a test
build, so I moved its definition from pgtable.h to cache.c.]

Signed-off-by: Lars Persson <larper@axis.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/7511/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
This commit is contained in:
Lars Persson 2014-08-08 15:47:48 +02:00 committed by Ralf Baechle
parent 6461568265
commit 2a4a8b1e5d
2 changed files with 22 additions and 13 deletions

View File

@ -122,6 +122,9 @@ do { \
} \
} while(0)
extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
pte_t pteval);
#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
#define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
@ -145,7 +148,6 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
}
}
}
#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
@ -183,7 +185,6 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
}
#endif
}
#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
@ -390,15 +391,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
pte_t pte);
extern void __update_cache(struct vm_area_struct *vma, unsigned long address,
pte_t pte);
static inline void update_mmu_cache(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
pte_t pte = *ptep;
__update_tlb(vma, address, pte);
__update_cache(vma, address, pte);
}
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,

View File

@ -119,25 +119,36 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
EXPORT_SYMBOL(__flush_anon_page);
void __update_cache(struct vm_area_struct *vma, unsigned long address,
pte_t pte)
static void mips_flush_dcache_from_pte(pte_t pteval, unsigned long address)
{
struct page *page;
unsigned long pfn, addr;
int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
unsigned long pfn = pte_pfn(pteval);
pfn = pte_pfn(pte);
if (unlikely(!pfn_valid(pfn)))
return;
page = pfn_to_page(pfn);
if (page_mapping(page) && Page_dcache_dirty(page)) {
addr = (unsigned long) page_address(page);
if (exec || pages_do_alias(addr, address & PAGE_MASK))
flush_data_cache_page(addr);
unsigned long page_addr = (unsigned long) page_address(page);
if (!cpu_has_ic_fills_f_dc ||
pages_do_alias(page_addr, address & PAGE_MASK))
flush_data_cache_page(page_addr);
ClearPageDcacheDirty(page);
}
}
void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval)
{
if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc) {
if (pte_present(pteval))
mips_flush_dcache_from_pte(pteval, addr);
}
set_pte(ptep, pteval);
}
unsigned long _page_cachable_default;
EXPORT_SYMBOL(_page_cachable_default);