sh: Optimized cache handling for SH-4/SH-4A caches.
This reworks some of the SH-4 cache handling code to more easily accomodate newer-style caches (particularly for the > direct-mapped case), as well as optimizing some of the old code. Signed-off-by: Richard Curnow <richard.curnow@st.com> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
committed by
Paul Mundt
parent
fdfc74f9fc
commit
b638d0b921
@@ -23,15 +23,29 @@
|
||||
#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
|
||||
|
||||
struct cache_info {
|
||||
unsigned int ways;
|
||||
unsigned int sets;
|
||||
unsigned int linesz;
|
||||
unsigned int ways; /* Number of cache ways */
|
||||
unsigned int sets; /* Number of cache sets */
|
||||
unsigned int linesz; /* Cache line size (bytes) */
|
||||
|
||||
unsigned int way_size; /* sets * line size */
|
||||
|
||||
/*
|
||||
* way_incr is the address offset for accessing the next way
|
||||
* in memory mapped cache array ops.
|
||||
*/
|
||||
unsigned int way_incr;
|
||||
|
||||
unsigned int entry_shift;
|
||||
unsigned int entry_mask;
|
||||
|
||||
/*
|
||||
* Compute a mask which selects the address bits which overlap between
|
||||
* 1. those used to select the cache set during indexing
|
||||
* 2. those in the physical page number.
|
||||
*/
|
||||
unsigned int alias_mask;
|
||||
|
||||
unsigned int n_aliases; /* Number of aliases */
|
||||
|
||||
unsigned long flags;
|
||||
};
|
||||
|
||||
|
Reference in New Issue
Block a user