sh: Preparation for uncached jumps through PMB.
Presently most of the 29-bit physical parts do P1/P2 segmentation with a 1:1 cached/uncached mapping, jumping between the two to control the caching behaviour. This provides the basic infrastructure to maintain this behaviour on 32-bit physical parts that don't map P1/P2 at all, using a shiny new linker section and corresponding fixmap entry. Signed-off-by: Stuart Menefy <stuart.menefy@st.com> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
committed by
Paul Mundt
parent
325df7f204
commit
cbaa118ecf
@@ -163,18 +163,18 @@ repeat:
|
||||
return 0;
|
||||
}
|
||||
|
||||
int set_pmb_entry(struct pmb_entry *pmbe)
|
||||
int __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe)
|
||||
{
|
||||
int ret;
|
||||
|
||||
jump_to_P2();
|
||||
jump_to_uncached();
|
||||
ret = __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &pmbe->entry);
|
||||
back_to_P1();
|
||||
back_to_cached();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void clear_pmb_entry(struct pmb_entry *pmbe)
|
||||
void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe)
|
||||
{
|
||||
unsigned int entry = pmbe->entry;
|
||||
unsigned long addr;
|
||||
@@ -188,7 +188,7 @@ void clear_pmb_entry(struct pmb_entry *pmbe)
|
||||
entry >= NR_PMB_ENTRIES))
|
||||
return;
|
||||
|
||||
jump_to_P2();
|
||||
jump_to_uncached();
|
||||
|
||||
/* Clear V-bit */
|
||||
addr = mk_pmb_addr(entry);
|
||||
@@ -197,7 +197,7 @@ void clear_pmb_entry(struct pmb_entry *pmbe)
|
||||
addr = mk_pmb_data(entry);
|
||||
ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr);
|
||||
|
||||
back_to_P1();
|
||||
back_to_cached();
|
||||
|
||||
clear_bit(entry, &pmb_map);
|
||||
}
|
||||
@@ -302,7 +302,7 @@ static void pmb_cache_ctor(struct kmem_cache *cachep, void *pmb)
|
||||
pmbe->entry = PMB_NO_ENTRY;
|
||||
}
|
||||
|
||||
static int __init pmb_init(void)
|
||||
static int __uses_jump_to_uncached pmb_init(void)
|
||||
{
|
||||
unsigned int nr_entries = ARRAY_SIZE(pmb_init_map);
|
||||
unsigned int entry, i;
|
||||
@@ -312,7 +312,7 @@ static int __init pmb_init(void)
|
||||
pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0,
|
||||
SLAB_PANIC, pmb_cache_ctor);
|
||||
|
||||
jump_to_P2();
|
||||
jump_to_uncached();
|
||||
|
||||
/*
|
||||
* Ordering is important, P2 must be mapped in the PMB before we
|
||||
@@ -335,7 +335,7 @@ static int __init pmb_init(void)
|
||||
i |= MMUCR_TI;
|
||||
ctrl_outl(i, MMUCR);
|
||||
|
||||
back_to_P1();
|
||||
back_to_cached();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
Reference in New Issue
Block a user