Pull kvm-patches into release branch
This commit is contained in:
@@ -97,6 +97,7 @@
|
||||
|
||||
#include <asm/irq.h>
|
||||
#include <asm/hw_irq.h>
|
||||
#include <asm/tlb.h>
|
||||
|
||||
#include "mca_drv.h"
|
||||
#include "entry.h"
|
||||
@@ -112,6 +113,7 @@ DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
|
||||
DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
|
||||
DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */
|
||||
DEFINE_PER_CPU(u64, ia64_mca_pal_base); /* vaddr PAL code granule */
|
||||
DEFINE_PER_CPU(u64, ia64_mca_tr_reload); /* Flag for TR reload */
|
||||
|
||||
unsigned long __per_cpu_mca[NR_CPUS];
|
||||
|
||||
@@ -1182,6 +1184,49 @@ all_in:
|
||||
return;
|
||||
}
|
||||
|
||||
/* mca_insert_tr
|
||||
*
|
||||
* Switch rid when TR reload and needed!
|
||||
* iord: 1: itr, 2: itr;
|
||||
*
|
||||
*/
|
||||
static void mca_insert_tr(u64 iord)
|
||||
{
|
||||
|
||||
int i;
|
||||
u64 old_rr;
|
||||
struct ia64_tr_entry *p;
|
||||
unsigned long psr;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
psr = ia64_clear_ic();
|
||||
for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) {
|
||||
p = &__per_cpu_idtrs[cpu][iord-1][i];
|
||||
if (p->pte & 0x1) {
|
||||
old_rr = ia64_get_rr(p->ifa);
|
||||
if (old_rr != p->rr) {
|
||||
ia64_set_rr(p->ifa, p->rr);
|
||||
ia64_srlz_d();
|
||||
}
|
||||
ia64_ptr(iord, p->ifa, p->itir >> 2);
|
||||
ia64_srlz_i();
|
||||
if (iord & 0x1) {
|
||||
ia64_itr(0x1, i, p->ifa, p->pte, p->itir >> 2);
|
||||
ia64_srlz_i();
|
||||
}
|
||||
if (iord & 0x2) {
|
||||
ia64_itr(0x2, i, p->ifa, p->pte, p->itir >> 2);
|
||||
ia64_srlz_i();
|
||||
}
|
||||
if (old_rr != p->rr) {
|
||||
ia64_set_rr(p->ifa, old_rr);
|
||||
ia64_srlz_d();
|
||||
}
|
||||
}
|
||||
}
|
||||
ia64_set_psr(psr);
|
||||
}
|
||||
|
||||
/*
|
||||
* ia64_mca_handler
|
||||
*
|
||||
@@ -1271,6 +1316,10 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
|
||||
monarch_cpu = -1;
|
||||
#endif
|
||||
}
|
||||
if (__get_cpu_var(ia64_mca_tr_reload)) {
|
||||
mca_insert_tr(0x1); /*Reload dynamic itrs*/
|
||||
mca_insert_tr(0x2); /*Reload dynamic itrs*/
|
||||
}
|
||||
if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover)
|
||||
== NOTIFY_STOP)
|
||||
ia64_mca_spin(__func__);
|
||||
|
@@ -219,8 +219,13 @@ ia64_reload_tr:
|
||||
mov r20=IA64_TR_CURRENT_STACK
|
||||
;;
|
||||
itr.d dtr[r20]=r16
|
||||
GET_THIS_PADDR(r2, ia64_mca_tr_reload)
|
||||
mov r18 = 1
|
||||
;;
|
||||
srlz.d
|
||||
;;
|
||||
st8 [r2] =r18
|
||||
;;
|
||||
|
||||
done_tlb_purge_and_reload:
|
||||
|
||||
|
@@ -209,6 +209,19 @@ send_IPI_allbutself (int op)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Called with preemption disabled.
|
||||
*/
|
||||
static inline void
|
||||
send_IPI_mask(cpumask_t mask, int op)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
for_each_cpu_mask(cpu, mask) {
|
||||
send_IPI_single(cpu, op);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Called with preemption disabled.
|
||||
*/
|
||||
@@ -401,6 +414,75 @@ smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int
|
||||
}
|
||||
EXPORT_SYMBOL(smp_call_function_single);
|
||||
|
||||
/**
|
||||
* smp_call_function_mask(): Run a function on a set of other CPUs.
|
||||
* <mask> The set of cpus to run on. Must not include the current cpu.
|
||||
* <func> The function to run. This must be fast and non-blocking.
|
||||
* <info> An arbitrary pointer to pass to the function.
|
||||
* <wait> If true, wait (atomically) until function
|
||||
* has completed on other CPUs.
|
||||
*
|
||||
* Returns 0 on success, else a negative status code.
|
||||
*
|
||||
* If @wait is true, then returns once @func has returned; otherwise
|
||||
* it returns just before the target cpu calls @func.
|
||||
*
|
||||
* You must not call this function with disabled interrupts or from a
|
||||
* hardware interrupt handler or from a bottom half handler.
|
||||
*/
|
||||
int smp_call_function_mask(cpumask_t mask,
|
||||
void (*func)(void *), void *info,
|
||||
int wait)
|
||||
{
|
||||
struct call_data_struct data;
|
||||
cpumask_t allbutself;
|
||||
int cpus;
|
||||
|
||||
spin_lock(&call_lock);
|
||||
allbutself = cpu_online_map;
|
||||
cpu_clear(smp_processor_id(), allbutself);
|
||||
|
||||
cpus_and(mask, mask, allbutself);
|
||||
cpus = cpus_weight(mask);
|
||||
if (!cpus) {
|
||||
spin_unlock(&call_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Can deadlock when called with interrupts disabled */
|
||||
WARN_ON(irqs_disabled());
|
||||
|
||||
data.func = func;
|
||||
data.info = info;
|
||||
atomic_set(&data.started, 0);
|
||||
data.wait = wait;
|
||||
if (wait)
|
||||
atomic_set(&data.finished, 0);
|
||||
|
||||
call_data = &data;
|
||||
mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC*/
|
||||
|
||||
/* Send a message to other CPUs */
|
||||
if (cpus_equal(mask, allbutself))
|
||||
send_IPI_allbutself(IPI_CALL_FUNC);
|
||||
else
|
||||
send_IPI_mask(mask, IPI_CALL_FUNC);
|
||||
|
||||
/* Wait for response */
|
||||
while (atomic_read(&data.started) != cpus)
|
||||
cpu_relax();
|
||||
|
||||
if (wait)
|
||||
while (atomic_read(&data.finished) != cpus)
|
||||
cpu_relax();
|
||||
call_data = NULL;
|
||||
|
||||
spin_unlock(&call_lock);
|
||||
return 0;
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(smp_call_function_mask);
|
||||
|
||||
/*
|
||||
* this function sends a 'generic call function' IPI to all other CPUs
|
||||
* in the system.
|
||||
|
Reference in New Issue
Block a user