sparc32: centralize all mmu context handling in srmmu.c
Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
committed by
David S. Miller
parent
59b00c792f
commit
b585e8551b
@@ -9,14 +9,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/* Initialize a new mmu context. This is invoked when a new
|
||||||
* Initialize a new mmu context. This is invoked when a new
|
|
||||||
* address space instance (unique or shared) is instantiated.
|
* address space instance (unique or shared) is instantiated.
|
||||||
*/
|
*/
|
||||||
#define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0)
|
int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
|
||||||
|
|
||||||
/*
|
/* Destroy a dead context. This occurs when mmput drops the
|
||||||
* Destroy a dead context. This occurs when mmput drops the
|
|
||||||
* mm_users count to zero, the mmaps have been released, and
|
* mm_users count to zero, the mmaps have been released, and
|
||||||
* all the page tables have been flushed. Our job is to destroy
|
* all the page tables have been flushed. Our job is to destroy
|
||||||
* any remaining processor-specific state.
|
* any remaining processor-specific state.
|
||||||
|
@@ -79,8 +79,6 @@ extern unsigned long ptr_in_current_pgd;
|
|||||||
#define __S110 PAGE_SHARED
|
#define __S110 PAGE_SHARED
|
||||||
#define __S111 PAGE_SHARED
|
#define __S111 PAGE_SHARED
|
||||||
|
|
||||||
extern int num_contexts;
|
|
||||||
|
|
||||||
/* First physical page can be anywhere, the following is needed so that
|
/* First physical page can be anywhere, the following is needed so that
|
||||||
* va-->pa and vice versa conversions work properly without performance
|
* va-->pa and vice versa conversions work properly without performance
|
||||||
* hit for all __pa()/__va() operations.
|
* hit for all __pa()/__va() operations.
|
||||||
@@ -399,36 +397,6 @@ static inline pte_t pgoff_to_pte(unsigned long pgoff)
|
|||||||
*/
|
*/
|
||||||
#define PTE_FILE_MAX_BITS 24
|
#define PTE_FILE_MAX_BITS 24
|
||||||
|
|
||||||
/*
|
|
||||||
*/
|
|
||||||
struct ctx_list {
|
|
||||||
struct ctx_list *next;
|
|
||||||
struct ctx_list *prev;
|
|
||||||
unsigned int ctx_number;
|
|
||||||
struct mm_struct *ctx_mm;
|
|
||||||
};
|
|
||||||
|
|
||||||
extern struct ctx_list *ctx_list_pool; /* Dynamically allocated */
|
|
||||||
extern struct ctx_list ctx_free; /* Head of free list */
|
|
||||||
extern struct ctx_list ctx_used; /* Head of used contexts list */
|
|
||||||
|
|
||||||
#define NO_CONTEXT -1
|
|
||||||
|
|
||||||
static inline void remove_from_ctx_list(struct ctx_list *entry)
|
|
||||||
{
|
|
||||||
entry->next->prev = entry->prev;
|
|
||||||
entry->prev->next = entry->next;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
|
|
||||||
{
|
|
||||||
entry->next = head;
|
|
||||||
(entry->prev = head->prev)->next = entry;
|
|
||||||
head->prev = entry;
|
|
||||||
}
|
|
||||||
#define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
|
|
||||||
#define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
|
|
||||||
|
|
||||||
static inline unsigned long
|
static inline unsigned long
|
||||||
__get_phys (unsigned long addr)
|
__get_phys (unsigned long addr)
|
||||||
{
|
{
|
||||||
|
@@ -371,7 +371,6 @@ void __init setup_arch(char **cmdline_p)
|
|||||||
(*(linux_dbvec->teach_debugger))();
|
(*(linux_dbvec->teach_debugger))();
|
||||||
}
|
}
|
||||||
|
|
||||||
init_mm.context = (unsigned long) NO_CONTEXT;
|
|
||||||
init_task.thread.kregs = &fake_swapper_regs;
|
init_task.thread.kregs = &fake_swapper_regs;
|
||||||
|
|
||||||
/* Run-time patch instructions to match the cpu model */
|
/* Run-time patch instructions to match the cpu model */
|
||||||
|
@@ -32,12 +32,6 @@
|
|||||||
|
|
||||||
int show_unhandled_signals = 1;
|
int show_unhandled_signals = 1;
|
||||||
|
|
||||||
/* At boot time we determine these two values necessary for setting
|
|
||||||
* up the segment maps and page table entries (pte's).
|
|
||||||
*/
|
|
||||||
|
|
||||||
int num_contexts;
|
|
||||||
|
|
||||||
/* Return how much physical memory we have. */
|
/* Return how much physical memory we have. */
|
||||||
unsigned long probe_memory(void)
|
unsigned long probe_memory(void)
|
||||||
{
|
{
|
||||||
|
@@ -82,24 +82,6 @@ void show_mem(unsigned int filter)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init sparc_context_init(int numctx)
|
|
||||||
{
|
|
||||||
int ctx;
|
|
||||||
|
|
||||||
ctx_list_pool = __alloc_bootmem(numctx * sizeof(struct ctx_list), SMP_CACHE_BYTES, 0UL);
|
|
||||||
|
|
||||||
for(ctx = 0; ctx < numctx; ctx++) {
|
|
||||||
struct ctx_list *clist;
|
|
||||||
|
|
||||||
clist = (ctx_list_pool + ctx);
|
|
||||||
clist->ctx_number = ctx;
|
|
||||||
clist->ctx_mm = NULL;
|
|
||||||
}
|
|
||||||
ctx_free.next = ctx_free.prev = &ctx_free;
|
|
||||||
ctx_used.next = ctx_used.prev = &ctx_used;
|
|
||||||
for(ctx = 0; ctx < numctx; ctx++)
|
|
||||||
add_to_free_ctxlist(ctx_list_pool + ctx);
|
|
||||||
}
|
|
||||||
|
|
||||||
extern unsigned long cmdline_memory_size;
|
extern unsigned long cmdline_memory_size;
|
||||||
unsigned long last_valid_pfn;
|
unsigned long last_valid_pfn;
|
||||||
|
@@ -55,10 +55,6 @@ static unsigned int hwbug_bitmask;
|
|||||||
int vac_cache_size;
|
int vac_cache_size;
|
||||||
int vac_line_size;
|
int vac_line_size;
|
||||||
|
|
||||||
struct ctx_list *ctx_list_pool;
|
|
||||||
struct ctx_list ctx_free;
|
|
||||||
struct ctx_list ctx_used;
|
|
||||||
|
|
||||||
extern struct resource sparc_iomap;
|
extern struct resource sparc_iomap;
|
||||||
|
|
||||||
extern unsigned long last_valid_pfn;
|
extern unsigned long last_valid_pfn;
|
||||||
@@ -355,8 +351,39 @@ void pte_free(struct mm_struct *mm, pgtable_t pte)
|
|||||||
srmmu_free_nocache(__nocache_va(p), PTE_SIZE);
|
srmmu_free_nocache(__nocache_va(p), PTE_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/* context handling - a dynamically sized pool is used */
|
||||||
*/
|
#define NO_CONTEXT -1
|
||||||
|
|
||||||
|
struct ctx_list {
|
||||||
|
struct ctx_list *next;
|
||||||
|
struct ctx_list *prev;
|
||||||
|
unsigned int ctx_number;
|
||||||
|
struct mm_struct *ctx_mm;
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct ctx_list *ctx_list_pool;
|
||||||
|
static struct ctx_list ctx_free;
|
||||||
|
static struct ctx_list ctx_used;
|
||||||
|
|
||||||
|
/* At boot time we determine the number of contexts */
|
||||||
|
static int num_contexts;
|
||||||
|
|
||||||
|
static inline void remove_from_ctx_list(struct ctx_list *entry)
|
||||||
|
{
|
||||||
|
entry->next->prev = entry->prev;
|
||||||
|
entry->prev->next = entry->next;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
|
||||||
|
{
|
||||||
|
entry->next = head;
|
||||||
|
(entry->prev = head->prev)->next = entry;
|
||||||
|
head->prev = entry;
|
||||||
|
}
|
||||||
|
#define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
|
||||||
|
#define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
|
||||||
|
|
||||||
|
|
||||||
static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
|
static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
struct ctx_list *ctxp;
|
struct ctx_list *ctxp;
|
||||||
@@ -392,6 +419,26 @@ static inline void free_context(int context)
|
|||||||
add_to_free_ctxlist(ctx_old);
|
add_to_free_ctxlist(ctx_old);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __init sparc_context_init(int numctx)
|
||||||
|
{
|
||||||
|
int ctx;
|
||||||
|
unsigned long size;
|
||||||
|
|
||||||
|
size = numctx * sizeof(struct ctx_list);
|
||||||
|
ctx_list_pool = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL);
|
||||||
|
|
||||||
|
for (ctx = 0; ctx < numctx; ctx++) {
|
||||||
|
struct ctx_list *clist;
|
||||||
|
|
||||||
|
clist = (ctx_list_pool + ctx);
|
||||||
|
clist->ctx_number = ctx;
|
||||||
|
clist->ctx_mm = NULL;
|
||||||
|
}
|
||||||
|
ctx_free.next = ctx_free.prev = &ctx_free;
|
||||||
|
ctx_used.next = ctx_used.prev = &ctx_used;
|
||||||
|
for (ctx = 0; ctx < numctx; ctx++)
|
||||||
|
add_to_free_ctxlist(ctx_list_pool + ctx);
|
||||||
|
}
|
||||||
|
|
||||||
void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
|
void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
|
||||||
struct task_struct *tsk)
|
struct task_struct *tsk)
|
||||||
@@ -799,9 +846,6 @@ static void __init map_kernel(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Paging initialization on the Sparc Reference MMU. */
|
|
||||||
extern void sparc_context_init(int);
|
|
||||||
|
|
||||||
void (*poke_srmmu)(void) __cpuinitdata = NULL;
|
void (*poke_srmmu)(void) __cpuinitdata = NULL;
|
||||||
|
|
||||||
extern unsigned long bootmem_init(unsigned long *pages_avail);
|
extern unsigned long bootmem_init(unsigned long *pages_avail);
|
||||||
@@ -816,6 +860,7 @@ void __init srmmu_paging_init(void)
|
|||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
unsigned long pages_avail;
|
unsigned long pages_avail;
|
||||||
|
|
||||||
|
init_mm.context = (unsigned long) NO_CONTEXT;
|
||||||
sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */
|
sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */
|
||||||
|
|
||||||
if (sparc_cpu_model == sun4d)
|
if (sparc_cpu_model == sun4d)
|
||||||
@@ -918,6 +963,12 @@ void mmu_info(struct seq_file *m)
|
|||||||
srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
|
srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
||||||
|
{
|
||||||
|
mm->context = NO_CONTEXT;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
void destroy_context(struct mm_struct *mm)
|
void destroy_context(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user