sh: TLB fast path optimizations for load/store exceptions.
This only bothers with the TLB entry flush in the case of the initial page write exception, as it is unecessary in the case of the load/store exceptions. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
@@ -124,7 +124,7 @@ ENTRY(tlb_miss_store)
|
|||||||
.align 2
|
.align 2
|
||||||
ENTRY(initial_page_write)
|
ENTRY(initial_page_write)
|
||||||
bra call_handle_tlbmiss
|
bra call_handle_tlbmiss
|
||||||
mov #1, r5
|
mov #2, r5
|
||||||
|
|
||||||
.align 2
|
.align 2
|
||||||
ENTRY(tlb_protection_violation_load)
|
ENTRY(tlb_protection_violation_load)
|
||||||
|
@@ -327,7 +327,6 @@ handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess,
|
|||||||
pmd_t *pmd;
|
pmd_t *pmd;
|
||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
pte_t entry;
|
pte_t entry;
|
||||||
int ret = 1;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We don't take page faults for P1, P2, and parts of P4, these
|
* We don't take page faults for P1, P2, and parts of P4, these
|
||||||
@@ -338,40 +337,41 @@ handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess,
|
|||||||
pgd = pgd_offset_k(address);
|
pgd = pgd_offset_k(address);
|
||||||
} else {
|
} else {
|
||||||
if (unlikely(address >= TASK_SIZE || !current->mm))
|
if (unlikely(address >= TASK_SIZE || !current->mm))
|
||||||
goto out;
|
return 1;
|
||||||
|
|
||||||
pgd = pgd_offset(current->mm, address);
|
pgd = pgd_offset(current->mm, address);
|
||||||
}
|
}
|
||||||
|
|
||||||
pud = pud_offset(pgd, address);
|
pud = pud_offset(pgd, address);
|
||||||
if (pud_none_or_clear_bad(pud))
|
if (pud_none_or_clear_bad(pud))
|
||||||
goto out;
|
return 1;
|
||||||
pmd = pmd_offset(pud, address);
|
pmd = pmd_offset(pud, address);
|
||||||
if (pmd_none_or_clear_bad(pmd))
|
if (pmd_none_or_clear_bad(pmd))
|
||||||
goto out;
|
return 1;
|
||||||
pte = pte_offset_kernel(pmd, address);
|
pte = pte_offset_kernel(pmd, address);
|
||||||
entry = *pte;
|
entry = *pte;
|
||||||
if (unlikely(pte_none(entry) || pte_not_present(entry)))
|
if (unlikely(pte_none(entry) || pte_not_present(entry)))
|
||||||
goto out;
|
return 1;
|
||||||
if (unlikely(writeaccess && !pte_write(entry)))
|
if (unlikely(writeaccess && !pte_write(entry)))
|
||||||
goto out;
|
return 1;
|
||||||
|
|
||||||
if (writeaccess)
|
if (writeaccess)
|
||||||
entry = pte_mkdirty(entry);
|
entry = pte_mkdirty(entry);
|
||||||
entry = pte_mkyoung(entry);
|
entry = pte_mkyoung(entry);
|
||||||
|
|
||||||
|
set_pte(pte, entry);
|
||||||
|
|
||||||
#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP)
|
#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP)
|
||||||
/*
|
/*
|
||||||
* ITLB is not affected by "ldtlb" instruction.
|
* SH-4 does not set MMUCR.RC to the corresponding TLB entry in
|
||||||
* So, we need to flush the entry by ourselves.
|
* the case of an initial page write exception, so we need to
|
||||||
|
* flush it in order to avoid potential TLB entry duplication.
|
||||||
*/
|
*/
|
||||||
local_flush_tlb_one(get_asid(), address & PAGE_MASK);
|
if (writeaccess == 2)
|
||||||
|
local_flush_tlb_one(get_asid(), address & PAGE_MASK);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
set_pte(pte, entry);
|
|
||||||
update_mmu_cache(NULL, address, entry);
|
update_mmu_cache(NULL, address, entry);
|
||||||
|
|
||||||
ret = 0;
|
return 0;
|
||||||
out:
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user