sh: Migrate SH-4 cacheflush ops to function pointers.
This paves the way for allowing individual CPUs to overload the individual flushing routines that they care about without having to depend on weak aliases. SH-4 is converted over initially, as it wires up pretty much everything. The majority of the other CPUs will simply use the default no-op implementation with their own region flushers wired up. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
@ -3,10 +3,6 @@
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
struct vm_area_struct;
|
||||
struct page;
|
||||
struct mm_struct;
|
||||
|
||||
extern void flush_cache_all(void);
|
||||
extern void flush_cache_mm(struct mm_struct *mm);
|
||||
extern void flush_cache_sigtramp(unsigned long vaddr);
|
||||
@ -16,10 +12,14 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, uns
|
||||
extern void flush_dcache_page(struct page *pg);
|
||||
extern void flush_icache_range(unsigned long start, unsigned long end);
|
||||
|
||||
/* XXX .. */
|
||||
extern void (*__flush_wback_region)(void *start, int size);
|
||||
extern void (*__flush_purge_region)(void *start, int size);
|
||||
extern void (*__flush_invalidate_region)(void *start, int size);
|
||||
|
||||
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
|
||||
#define flush_icache_page(vma, page) do { } while (0)
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* __ASM_SH_CPU_SH5_CACHEFLUSH_H */
|
||||
|
||||
|
Reference in New Issue
Block a user