gru: check context state on reload
Check whether the gru state being loaded into a gru is from a new context or a previously unloaded context. If new, simply zero out the hardware context; if unloaded and valid, reload the old state. This change is primarily for reloading kernel contexts where the previous is not required to be saved. Signed-off-by: Jack Steiner <steiner@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
17b49a67a6
commit
940229b9c0
@@ -307,11 +307,12 @@ struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
|
|||||||
|
|
||||||
bytes = DSR_BYTES(dsr_au_count) + CBR_BYTES(cbr_au_count);
|
bytes = DSR_BYTES(dsr_au_count) + CBR_BYTES(cbr_au_count);
|
||||||
bytes += sizeof(struct gru_thread_state);
|
bytes += sizeof(struct gru_thread_state);
|
||||||
gts = kzalloc(bytes, GFP_KERNEL);
|
gts = kmalloc(bytes, GFP_KERNEL);
|
||||||
if (!gts)
|
if (!gts)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
STAT(gts_alloc);
|
STAT(gts_alloc);
|
||||||
|
memset(gts, 0, sizeof(struct gru_thread_state)); /* zero out header */
|
||||||
atomic_set(>s->ts_refcnt, 1);
|
atomic_set(>s->ts_refcnt, 1);
|
||||||
mutex_init(>s->ts_ctxlock);
|
mutex_init(>s->ts_ctxlock);
|
||||||
gts->ts_cbr_au_count = cbr_au_count;
|
gts->ts_cbr_au_count = cbr_au_count;
|
||||||
@@ -458,7 +459,8 @@ static void gru_prefetch_context(void *gseg, void *cb, void *cbe,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void gru_load_context_data(void *save, void *grubase, int ctxnum,
|
static void gru_load_context_data(void *save, void *grubase, int ctxnum,
|
||||||
unsigned long cbrmap, unsigned long dsrmap)
|
unsigned long cbrmap, unsigned long dsrmap,
|
||||||
|
int data_valid)
|
||||||
{
|
{
|
||||||
void *gseg, *cb, *cbe;
|
void *gseg, *cb, *cbe;
|
||||||
unsigned long length;
|
unsigned long length;
|
||||||
@@ -471,12 +473,22 @@ static void gru_load_context_data(void *save, void *grubase, int ctxnum,
|
|||||||
gru_prefetch_context(gseg, cb, cbe, cbrmap, length);
|
gru_prefetch_context(gseg, cb, cbe, cbrmap, length);
|
||||||
|
|
||||||
for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
|
for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
|
||||||
|
if (data_valid) {
|
||||||
save += gru_copy_handle(cb, save);
|
save += gru_copy_handle(cb, save);
|
||||||
save += gru_copy_handle(cbe + i * GRU_HANDLE_STRIDE, save);
|
save += gru_copy_handle(cbe + i * GRU_HANDLE_STRIDE,
|
||||||
|
save);
|
||||||
|
} else {
|
||||||
|
memset(cb, 0, GRU_CACHE_LINE_BYTES);
|
||||||
|
memset(cbe + i * GRU_HANDLE_STRIDE, 0,
|
||||||
|
GRU_CACHE_LINE_BYTES);
|
||||||
|
}
|
||||||
cb += GRU_HANDLE_STRIDE;
|
cb += GRU_HANDLE_STRIDE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (data_valid)
|
||||||
memcpy(gseg + GRU_DS_BASE, save, length);
|
memcpy(gseg + GRU_DS_BASE, save, length);
|
||||||
|
else
|
||||||
|
memset(gseg + GRU_DS_BASE, 0, length);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gru_unload_context_data(void *save, void *grubase, int ctxnum,
|
static void gru_unload_context_data(void *save, void *grubase, int ctxnum,
|
||||||
@@ -517,10 +529,12 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate)
|
|||||||
|
|
||||||
if (!is_kernel_context(gts))
|
if (!is_kernel_context(gts))
|
||||||
gru_unload_mm_tracker(gru, gts);
|
gru_unload_mm_tracker(gru, gts);
|
||||||
if (savestate)
|
if (savestate) {
|
||||||
gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr,
|
gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr,
|
||||||
ctxnum, gts->ts_cbr_map,
|
ctxnum, gts->ts_cbr_map,
|
||||||
gts->ts_dsr_map);
|
gts->ts_dsr_map);
|
||||||
|
gts->ts_data_valid = 1;
|
||||||
|
}
|
||||||
|
|
||||||
if (cch_deallocate(cch))
|
if (cch_deallocate(cch))
|
||||||
BUG();
|
BUG();
|
||||||
@@ -576,7 +590,7 @@ void gru_load_context(struct gru_thread_state *gts)
|
|||||||
}
|
}
|
||||||
|
|
||||||
gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum,
|
gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum,
|
||||||
gts->ts_cbr_map, gts->ts_dsr_map);
|
gts->ts_cbr_map, gts->ts_dsr_map, gts->ts_data_valid);
|
||||||
|
|
||||||
if (cch_start(cch))
|
if (cch_start(cch))
|
||||||
BUG();
|
BUG();
|
||||||
|
@@ -385,6 +385,8 @@ struct gru_thread_state {
|
|||||||
after migration */
|
after migration */
|
||||||
char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each
|
char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each
|
||||||
allocated CB */
|
allocated CB */
|
||||||
|
int ts_data_valid; /* Indicates if ts_gdata has
|
||||||
|
valid data */
|
||||||
unsigned long ts_gdata[0]; /* save area for GRU data (CB,
|
unsigned long ts_gdata[0]; /* save area for GRU data (CB,
|
||||||
DS, CBE) */
|
DS, CBE) */
|
||||||
};
|
};
|
||||||
|
Reference in New Issue
Block a user