xen: implement save/restore
This patch implements Xen save/restore and migration. Saving is triggered via xenbus, which is polled in drivers/xen/manage.c. When a suspend request comes in, the kernel prepares itself for saving by: 1 - Freeze all processes. This is primarily to prevent any partially-completed pagetable updates from confusing the suspend process. If CONFIG_PREEMPT isn't defined, then this isn't necessary. 2 - Suspend xenbus and other devices 3 - Stop_machine, to make sure all the other vcpus are quiescent. The Xen tools require the domain to run its save off vcpu0. 4 - Within the stop_machine state, it pins any unpinned pgds (under construction or destruction), performs canonicalizes various other pieces of state (mostly converting mfns to pfns), and finally 5 - Suspend the domain Restore reverses the steps used to save the domain, ending when all the frozen processes are thawed. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
committed by
Thomas Gleixner
parent
7d88d32a46
commit
0e91398f2a
@ -674,6 +674,89 @@ static int retrigger_dynirq(unsigned int irq)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void restore_cpu_virqs(unsigned int cpu)
|
||||
{
|
||||
struct evtchn_bind_virq bind_virq;
|
||||
int virq, irq, evtchn;
|
||||
|
||||
for (virq = 0; virq < NR_VIRQS; virq++) {
|
||||
if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
|
||||
continue;
|
||||
|
||||
BUG_ON(irq_info[irq].type != IRQT_VIRQ);
|
||||
BUG_ON(irq_info[irq].index != virq);
|
||||
|
||||
/* Get a new binding from Xen. */
|
||||
bind_virq.virq = virq;
|
||||
bind_virq.vcpu = cpu;
|
||||
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
|
||||
&bind_virq) != 0)
|
||||
BUG();
|
||||
evtchn = bind_virq.port;
|
||||
|
||||
/* Record the new mapping. */
|
||||
evtchn_to_irq[evtchn] = irq;
|
||||
irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
|
||||
bind_evtchn_to_cpu(evtchn, cpu);
|
||||
|
||||
/* Ready for use. */
|
||||
unmask_evtchn(evtchn);
|
||||
}
|
||||
}
|
||||
|
||||
static void restore_cpu_ipis(unsigned int cpu)
|
||||
{
|
||||
struct evtchn_bind_ipi bind_ipi;
|
||||
int ipi, irq, evtchn;
|
||||
|
||||
for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
|
||||
if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
|
||||
continue;
|
||||
|
||||
BUG_ON(irq_info[irq].type != IRQT_IPI);
|
||||
BUG_ON(irq_info[irq].index != ipi);
|
||||
|
||||
/* Get a new binding from Xen. */
|
||||
bind_ipi.vcpu = cpu;
|
||||
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
|
||||
&bind_ipi) != 0)
|
||||
BUG();
|
||||
evtchn = bind_ipi.port;
|
||||
|
||||
/* Record the new mapping. */
|
||||
evtchn_to_irq[evtchn] = irq;
|
||||
irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
|
||||
bind_evtchn_to_cpu(evtchn, cpu);
|
||||
|
||||
/* Ready for use. */
|
||||
unmask_evtchn(evtchn);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
void xen_irq_resume(void)
|
||||
{
|
||||
unsigned int cpu, irq, evtchn;
|
||||
|
||||
init_evtchn_cpu_bindings();
|
||||
|
||||
/* New event-channel space is not 'live' yet. */
|
||||
for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
|
||||
mask_evtchn(evtchn);
|
||||
|
||||
/* No IRQ <-> event-channel mappings. */
|
||||
for (irq = 0; irq < NR_IRQS; irq++)
|
||||
irq_info[irq].evtchn = 0; /* zap event-channel binding */
|
||||
|
||||
for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
|
||||
evtchn_to_irq[evtchn] = -1;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
restore_cpu_virqs(cpu);
|
||||
restore_cpu_ipis(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
static struct irq_chip xen_dynamic_chip __read_mostly = {
|
||||
.name = "xen-dyn",
|
||||
.mask = disable_dynirq,
|
||||
|
Reference in New Issue
Block a user