powerpc: Get USE_STRICT_MM_TYPECHECKS working again
The typesafe version of the powerpc pagetable handling (with USE_STRICT_MM_TYPECHECKS defined) has bitrotted again. This patch makes a bunch of small fixes to get it back to building status. It's still not enabled by default as gcc still generates worse code with it for some reason. Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
This commit is contained in:
committed by
Benjamin Herrenschmidt
parent
cd301c7ba4
commit
f5ea64dcba
@ -41,7 +41,7 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
|
||||
page = pte_page(pte);
|
||||
if (!page_cache_get_speculative(page))
|
||||
return 0;
|
||||
if (unlikely(pte != *ptep)) {
|
||||
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
|
||||
put_page(page);
|
||||
return 0;
|
||||
}
|
||||
@ -92,7 +92,7 @@ static noinline int gup_huge_pte(pte_t *ptep, struct hstate *hstate,
|
||||
*nr -= refs;
|
||||
return 0;
|
||||
}
|
||||
if (unlikely(pte != *ptep)) {
|
||||
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
|
||||
/* Could be optimized better */
|
||||
while (*nr) {
|
||||
put_page(page);
|
||||
@ -237,7 +237,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
pgd_t pgd = *pgdp;
|
||||
|
||||
VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, addr)].shift);
|
||||
pr_debug(" %016lx: normal pgd %p\n", addr, (void *)pgd);
|
||||
pr_debug(" %016lx: normal pgd %p\n", addr,
|
||||
(void *)pgd_val(pgd));
|
||||
next = pgd_addr_end(addr, end);
|
||||
if (pgd_none(pgd))
|
||||
goto slow;
|
||||
|
@ -541,7 +541,7 @@ static unsigned long __init htab_get_table_size(void)
|
||||
void create_section_mapping(unsigned long start, unsigned long end)
|
||||
{
|
||||
BUG_ON(htab_bolt_mapping(start, end, __pa(start),
|
||||
PAGE_KERNEL, mmu_linear_psize,
|
||||
pgprot_val(PAGE_KERNEL), mmu_linear_psize,
|
||||
mmu_kernel_ssize));
|
||||
}
|
||||
|
||||
@ -649,7 +649,7 @@ void __init htab_initialize(void)
|
||||
mtspr(SPRN_SDR1, _SDR1);
|
||||
}
|
||||
|
||||
prot = PAGE_KERNEL;
|
||||
prot = pgprot_val(PAGE_KERNEL);
|
||||
|
||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||
linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT;
|
||||
|
@ -228,8 +228,8 @@ int __meminit vmemmap_populate(struct page *start_page,
|
||||
start, p, __pa(p));
|
||||
|
||||
mapped = htab_bolt_mapping(start, start + page_size, __pa(p),
|
||||
PAGE_KERNEL, mmu_vmemmap_psize,
|
||||
mmu_kernel_ssize);
|
||||
pgprot_val(PAGE_KERNEL),
|
||||
mmu_vmemmap_psize, mmu_kernel_ssize);
|
||||
BUG_ON(mapped < 0);
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user