Blackfin arch: Start untangling the CPLB handling code.
- Move cache initialization to C from assembly. - Move anomaly workaround for writing [ID]MEM_CONTROL to assembly, so that we don't have to mess around with .align directives in C source. - Fix a bug where bfin_write_DMEM_CONTROL would write to IMEM_CONTROL - Break out CPLB related code from kernel/setup.c into their own file. - Don't define variables in header files, only declare them. Signed-off-by: Bernd Schmidt <bernd.schmidt@analog.com> Signed-off-by: Bryan Wu <bryan.wu@analog.com>
This commit is contained in:
@ -64,10 +64,6 @@ EXPORT_SYMBOL(mtd_size);
|
||||
|
||||
char __initdata command_line[COMMAND_LINE_SIZE];
|
||||
|
||||
#if defined(CONFIG_BLKFIN_DCACHE) || defined(CONFIG_BLKFIN_CACHE)
|
||||
static void generate_cpl_tables(void);
|
||||
#endif
|
||||
|
||||
void __init bf53x_cache_init(void)
|
||||
{
|
||||
#if defined(CONFIG_BLKFIN_DCACHE) || defined(CONFIG_BLKFIN_CACHE)
|
||||
@ -401,8 +397,6 @@ void __init setup_arch(char **cmdline_p)
|
||||
_bfin_swrst = bfin_read_SWRST();
|
||||
#endif
|
||||
|
||||
bf53x_cache_init();
|
||||
|
||||
printk(KERN_INFO "Hardware Trace Enabled\n");
|
||||
bfin_write_TBUFCTL(0x03);
|
||||
|
||||
@ -426,6 +420,8 @@ void __init setup_arch(char **cmdline_p)
|
||||
!= ATOMIC_AND32 - FIXED_CODE_START);
|
||||
BUG_ON((char *)&atomic_xor32 - (char *)&fixed_code_start
|
||||
!= ATOMIC_XOR32 - FIXED_CODE_START);
|
||||
|
||||
bf53x_cache_init();
|
||||
}
|
||||
|
||||
static int __init topology_init(void)
|
||||
@ -443,287 +439,6 @@ static int __init topology_init(void)
|
||||
|
||||
subsys_initcall(topology_init);
|
||||
|
||||
#if defined(CONFIG_BLKFIN_DCACHE) || defined(CONFIG_BLKFIN_CACHE)
|
||||
static u16 __init lock_kernel_check(u32 start, u32 end)
|
||||
{
|
||||
if ((start <= (u32) _stext && end >= (u32) _end)
|
||||
|| (start >= (u32) _stext && end <= (u32) _end))
|
||||
return IN_KERNEL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned short __init
|
||||
fill_cplbtab(struct cplb_tab *table,
|
||||
unsigned long start, unsigned long end,
|
||||
unsigned long block_size, unsigned long cplb_data)
|
||||
{
|
||||
int i;
|
||||
|
||||
switch (block_size) {
|
||||
case SIZE_4M:
|
||||
i = 3;
|
||||
break;
|
||||
case SIZE_1M:
|
||||
i = 2;
|
||||
break;
|
||||
case SIZE_4K:
|
||||
i = 1;
|
||||
break;
|
||||
case SIZE_1K:
|
||||
default:
|
||||
i = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
cplb_data = (cplb_data & ~(3 << 16)) | (i << 16);
|
||||
|
||||
while ((start < end) && (table->pos < table->size)) {
|
||||
|
||||
table->tab[table->pos++] = start;
|
||||
|
||||
if (lock_kernel_check(start, start + block_size) == IN_KERNEL)
|
||||
table->tab[table->pos++] =
|
||||
cplb_data | CPLB_LOCK | CPLB_DIRTY;
|
||||
else
|
||||
table->tab[table->pos++] = cplb_data;
|
||||
|
||||
start += block_size;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned short __init
|
||||
close_cplbtab(struct cplb_tab *table)
|
||||
{
|
||||
|
||||
while (table->pos < table->size) {
|
||||
|
||||
table->tab[table->pos++] = 0;
|
||||
table->tab[table->pos++] = 0; /* !CPLB_VALID */
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* helper function */
|
||||
static void __fill_code_cplbtab(struct cplb_tab *t, int i,
|
||||
u32 a_start, u32 a_end)
|
||||
{
|
||||
if (cplb_data[i].psize) {
|
||||
fill_cplbtab(t,
|
||||
cplb_data[i].start,
|
||||
cplb_data[i].end,
|
||||
cplb_data[i].psize,
|
||||
cplb_data[i].i_conf);
|
||||
} else {
|
||||
#if (defined(CONFIG_BLKFIN_CACHE) && defined(ANOMALY_05000263))
|
||||
if (i == SDRAM_KERN) {
|
||||
fill_cplbtab(t,
|
||||
cplb_data[i].start,
|
||||
cplb_data[i].end,
|
||||
SIZE_4M,
|
||||
cplb_data[i].i_conf);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
fill_cplbtab(t,
|
||||
cplb_data[i].start,
|
||||
a_start,
|
||||
SIZE_1M,
|
||||
cplb_data[i].i_conf);
|
||||
fill_cplbtab(t,
|
||||
a_start,
|
||||
a_end,
|
||||
SIZE_4M,
|
||||
cplb_data[i].i_conf);
|
||||
fill_cplbtab(t, a_end,
|
||||
cplb_data[i].end,
|
||||
SIZE_1M,
|
||||
cplb_data[i].i_conf);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void __fill_data_cplbtab(struct cplb_tab *t, int i,
|
||||
u32 a_start, u32 a_end)
|
||||
{
|
||||
if (cplb_data[i].psize) {
|
||||
fill_cplbtab(t,
|
||||
cplb_data[i].start,
|
||||
cplb_data[i].end,
|
||||
cplb_data[i].psize,
|
||||
cplb_data[i].d_conf);
|
||||
} else {
|
||||
fill_cplbtab(t,
|
||||
cplb_data[i].start,
|
||||
a_start, SIZE_1M,
|
||||
cplb_data[i].d_conf);
|
||||
fill_cplbtab(t, a_start,
|
||||
a_end, SIZE_4M,
|
||||
cplb_data[i].d_conf);
|
||||
fill_cplbtab(t, a_end,
|
||||
cplb_data[i].end,
|
||||
SIZE_1M,
|
||||
cplb_data[i].d_conf);
|
||||
}
|
||||
}
|
||||
static void __init generate_cpl_tables(void)
|
||||
{
|
||||
|
||||
u16 i, j, process;
|
||||
u32 a_start, a_end, as, ae, as_1m;
|
||||
|
||||
struct cplb_tab *t_i = NULL;
|
||||
struct cplb_tab *t_d = NULL;
|
||||
struct s_cplb cplb;
|
||||
|
||||
cplb.init_i.size = MAX_CPLBS;
|
||||
cplb.init_d.size = MAX_CPLBS;
|
||||
cplb.switch_i.size = MAX_SWITCH_I_CPLBS;
|
||||
cplb.switch_d.size = MAX_SWITCH_D_CPLBS;
|
||||
|
||||
cplb.init_i.pos = 0;
|
||||
cplb.init_d.pos = 0;
|
||||
cplb.switch_i.pos = 0;
|
||||
cplb.switch_d.pos = 0;
|
||||
|
||||
cplb.init_i.tab = icplb_table;
|
||||
cplb.init_d.tab = dcplb_table;
|
||||
cplb.switch_i.tab = ipdt_table;
|
||||
cplb.switch_d.tab = dpdt_table;
|
||||
|
||||
cplb_data[SDRAM_KERN].end = memory_end;
|
||||
|
||||
#ifdef CONFIG_MTD_UCLINUX
|
||||
cplb_data[SDRAM_RAM_MTD].start = memory_mtd_start;
|
||||
cplb_data[SDRAM_RAM_MTD].end = memory_mtd_start + mtd_size;
|
||||
cplb_data[SDRAM_RAM_MTD].valid = mtd_size > 0;
|
||||
# if defined(CONFIG_ROMFS_FS)
|
||||
cplb_data[SDRAM_RAM_MTD].attr |= I_CPLB;
|
||||
|
||||
/*
|
||||
* The ROMFS_FS size is often not multiple of 1MB.
|
||||
* This can cause multiple CPLB sets covering the same memory area.
|
||||
* This will then cause multiple CPLB hit exceptions.
|
||||
* Workaround: We ensure a contiguous memory area by extending the kernel
|
||||
* memory section over the mtd section.
|
||||
* For ROMFS_FS memory must be covered with ICPLBs anyways.
|
||||
* So there is no difference between kernel and mtd memory setup.
|
||||
*/
|
||||
|
||||
cplb_data[SDRAM_KERN].end = memory_mtd_start + mtd_size;;
|
||||
cplb_data[SDRAM_RAM_MTD].valid = 0;
|
||||
|
||||
# endif
|
||||
#else
|
||||
cplb_data[SDRAM_RAM_MTD].valid = 0;
|
||||
#endif
|
||||
|
||||
cplb_data[SDRAM_DMAZ].start = _ramend - DMA_UNCACHED_REGION;
|
||||
cplb_data[SDRAM_DMAZ].end = _ramend;
|
||||
|
||||
cplb_data[RES_MEM].start = _ramend;
|
||||
cplb_data[RES_MEM].end = physical_mem_end;
|
||||
|
||||
if (reserved_mem_dcache_on)
|
||||
cplb_data[RES_MEM].d_conf = SDRAM_DGENERIC;
|
||||
else
|
||||
cplb_data[RES_MEM].d_conf = SDRAM_DNON_CHBL;
|
||||
|
||||
if (reserved_mem_icache_on)
|
||||
cplb_data[RES_MEM].i_conf = SDRAM_IGENERIC;
|
||||
else
|
||||
cplb_data[RES_MEM].i_conf = SDRAM_INON_CHBL;
|
||||
|
||||
for (i = ZERO_P; i <= L2_MEM; i++) {
|
||||
if (!cplb_data[i].valid)
|
||||
continue;
|
||||
|
||||
as_1m = cplb_data[i].start % SIZE_1M;
|
||||
|
||||
/*
|
||||
* We need to make sure all sections are properly 1M aligned
|
||||
* However between Kernel Memory and the Kernel mtd section,
|
||||
* depending on the rootfs size, there can be overlapping
|
||||
* memory areas.
|
||||
*/
|
||||
|
||||
if (as_1m && i != L1I_MEM && i != L1D_MEM) {
|
||||
#ifdef CONFIG_MTD_UCLINUX
|
||||
if (i == SDRAM_RAM_MTD) {
|
||||
if ((cplb_data[SDRAM_KERN].end + 1) >
|
||||
cplb_data[SDRAM_RAM_MTD].start)
|
||||
cplb_data[SDRAM_RAM_MTD].start =
|
||||
(cplb_data[i].start &
|
||||
(-2*SIZE_1M)) + SIZE_1M;
|
||||
else
|
||||
cplb_data[SDRAM_RAM_MTD].start =
|
||||
(cplb_data[i].start &
|
||||
(-2*SIZE_1M));
|
||||
} else
|
||||
#endif
|
||||
printk(KERN_WARNING
|
||||
"Unaligned Start of %s at 0x%X\n",
|
||||
cplb_data[i].name, cplb_data[i].start);
|
||||
}
|
||||
|
||||
as = cplb_data[i].start % SIZE_4M;
|
||||
ae = cplb_data[i].end % SIZE_4M;
|
||||
|
||||
if (as)
|
||||
a_start = cplb_data[i].start + (SIZE_4M - (as));
|
||||
else
|
||||
a_start = cplb_data[i].start;
|
||||
|
||||
a_end = cplb_data[i].end - ae;
|
||||
|
||||
for (j = INITIAL_T; j <= SWITCH_T; j++) {
|
||||
|
||||
switch (j) {
|
||||
case INITIAL_T:
|
||||
if (cplb_data[i].attr & INITIAL_T) {
|
||||
t_i = &cplb.init_i;
|
||||
t_d = &cplb.init_d;
|
||||
process = 1;
|
||||
} else
|
||||
process = 0;
|
||||
break;
|
||||
case SWITCH_T:
|
||||
if (cplb_data[i].attr & SWITCH_T) {
|
||||
t_i = &cplb.switch_i;
|
||||
t_d = &cplb.switch_d;
|
||||
process = 1;
|
||||
} else
|
||||
process = 0;
|
||||
break;
|
||||
default:
|
||||
process = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!process)
|
||||
continue;
|
||||
if (cplb_data[i].attr & I_CPLB)
|
||||
__fill_code_cplbtab(t_i, i, a_start, a_end);
|
||||
|
||||
if (cplb_data[i].attr & D_CPLB)
|
||||
__fill_data_cplbtab(t_d, i, a_start, a_end);
|
||||
}
|
||||
}
|
||||
|
||||
/* close tables */
|
||||
|
||||
close_cplbtab(&cplb.init_i);
|
||||
close_cplbtab(&cplb.init_d);
|
||||
|
||||
cplb.init_i.tab[cplb.init_i.pos] = -1;
|
||||
cplb.init_d.tab[cplb.init_d.pos] = -1;
|
||||
cplb.switch_i.tab[cplb.switch_i.pos] = -1;
|
||||
cplb.switch_d.tab[cplb.switch_d.pos] = -1;
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static u_long get_vco(void)
|
||||
{
|
||||
u_long msel;
|
||||
|
Reference in New Issue
Block a user