KVM: ioapic/msi interrupt delivery consolidation
ioapic_deliver() and kvm_set_msi() have code duplication. Move the code into ioapic_deliver_entry() function and call it from both places. Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
@@ -364,7 +364,7 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
|
|||||||
void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask);
|
void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask);
|
||||||
|
|
||||||
#ifdef __KVM_HAVE_IOAPIC
|
#ifdef __KVM_HAVE_IOAPIC
|
||||||
void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
|
void kvm_get_intr_delivery_bitmask(struct kvm *kvm,
|
||||||
union kvm_ioapic_redirect_entry *entry,
|
union kvm_ioapic_redirect_entry *entry,
|
||||||
unsigned long *deliver_bitmask);
|
unsigned long *deliver_bitmask);
|
||||||
#endif
|
#endif
|
||||||
|
@@ -142,12 +142,40 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int ioapic_deliver_entry(struct kvm *kvm, union kvm_ioapic_redirect_entry *e)
|
||||||
|
{
|
||||||
|
DECLARE_BITMAP(deliver_bitmask, KVM_MAX_VCPUS);
|
||||||
|
int i, r = -1;
|
||||||
|
|
||||||
|
kvm_get_intr_delivery_bitmask(kvm, e, deliver_bitmask);
|
||||||
|
|
||||||
|
if (find_first_bit(deliver_bitmask, KVM_MAX_VCPUS) >= KVM_MAX_VCPUS) {
|
||||||
|
ioapic_debug("no target on destination\n");
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
while ((i = find_first_bit(deliver_bitmask, KVM_MAX_VCPUS))
|
||||||
|
< KVM_MAX_VCPUS) {
|
||||||
|
struct kvm_vcpu *vcpu = kvm->vcpus[i];
|
||||||
|
__clear_bit(i, deliver_bitmask);
|
||||||
|
if (vcpu) {
|
||||||
|
if (r < 0)
|
||||||
|
r = 0;
|
||||||
|
r += kvm_apic_set_irq(vcpu, e->fields.vector,
|
||||||
|
e->fields.delivery_mode,
|
||||||
|
e->fields.trig_mode);
|
||||||
|
} else
|
||||||
|
ioapic_debug("null destination vcpu: "
|
||||||
|
"mask=%x vector=%x delivery_mode=%x\n",
|
||||||
|
e->fields.deliver_bitmask,
|
||||||
|
e->fields.vector, e->fields.delivery_mode);
|
||||||
|
}
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
|
static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
|
||||||
{
|
{
|
||||||
union kvm_ioapic_redirect_entry entry = ioapic->redirtbl[irq];
|
union kvm_ioapic_redirect_entry entry = ioapic->redirtbl[irq];
|
||||||
DECLARE_BITMAP(deliver_bitmask, KVM_MAX_VCPUS);
|
|
||||||
struct kvm_vcpu *vcpu;
|
|
||||||
int vcpu_id, r = -1;
|
|
||||||
|
|
||||||
ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
|
ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
|
||||||
"vector=%x trig_mode=%x\n",
|
"vector=%x trig_mode=%x\n",
|
||||||
@@ -155,39 +183,14 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
|
|||||||
entry.fields.delivery_mode, entry.fields.vector,
|
entry.fields.delivery_mode, entry.fields.vector,
|
||||||
entry.fields.trig_mode);
|
entry.fields.trig_mode);
|
||||||
|
|
||||||
/* Always delivery PIT interrupt to vcpu 0 */
|
|
||||||
#ifdef CONFIG_X86
|
#ifdef CONFIG_X86
|
||||||
|
/* Always delivery PIT interrupt to vcpu 0 */
|
||||||
if (irq == 0) {
|
if (irq == 0) {
|
||||||
bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);
|
entry.fields.dest_mode = 0; /* Physical mode. */
|
||||||
__set_bit(0, deliver_bitmask);
|
entry.fields.dest_id = ioapic->kvm->vcpus[0]->vcpu_id;
|
||||||
} else
|
}
|
||||||
#endif
|
#endif
|
||||||
kvm_get_intr_delivery_bitmask(ioapic, &entry, deliver_bitmask);
|
return ioapic_deliver_entry(ioapic->kvm, &entry);
|
||||||
|
|
||||||
if (find_first_bit(deliver_bitmask, KVM_MAX_VCPUS) >= KVM_MAX_VCPUS) {
|
|
||||||
ioapic_debug("no target on destination\n");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
while ((vcpu_id = find_first_bit(deliver_bitmask, KVM_MAX_VCPUS))
|
|
||||||
< KVM_MAX_VCPUS) {
|
|
||||||
__clear_bit(vcpu_id, deliver_bitmask);
|
|
||||||
vcpu = ioapic->kvm->vcpus[vcpu_id];
|
|
||||||
if (vcpu) {
|
|
||||||
if (r < 0)
|
|
||||||
r = 0;
|
|
||||||
r += kvm_apic_set_irq(vcpu,
|
|
||||||
entry.fields.vector,
|
|
||||||
entry.fields.trig_mode,
|
|
||||||
entry.fields.delivery_mode);
|
|
||||||
} else
|
|
||||||
ioapic_debug("null destination vcpu: "
|
|
||||||
"mask=%x vector=%x delivery_mode=%x\n",
|
|
||||||
entry.fields.deliver_bitmask,
|
|
||||||
entry.fields.vector,
|
|
||||||
entry.fields.delivery_mode);
|
|
||||||
}
|
|
||||||
return r;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
|
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
|
||||||
|
@@ -70,8 +70,8 @@ void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode);
|
|||||||
int kvm_ioapic_init(struct kvm *kvm);
|
int kvm_ioapic_init(struct kvm *kvm);
|
||||||
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level);
|
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level);
|
||||||
void kvm_ioapic_reset(struct kvm_ioapic *ioapic);
|
void kvm_ioapic_reset(struct kvm_ioapic *ioapic);
|
||||||
void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
|
void kvm_get_intr_delivery_bitmask(struct kvm *kvm,
|
||||||
union kvm_ioapic_redirect_entry *entry,
|
union kvm_ioapic_redirect_entry *entry,
|
||||||
unsigned long *deliver_bitmask);
|
unsigned long *deliver_bitmask);
|
||||||
|
int ioapic_deliver_entry(struct kvm *kvm, union kvm_ioapic_redirect_entry *e);
|
||||||
#endif
|
#endif
|
||||||
|
@@ -43,12 +43,11 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
|
|||||||
return kvm_ioapic_set_irq(kvm->arch.vioapic, e->irqchip.pin, level);
|
return kvm_ioapic_set_irq(kvm->arch.vioapic, e->irqchip.pin, level);
|
||||||
}
|
}
|
||||||
|
|
||||||
void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
|
void kvm_get_intr_delivery_bitmask(struct kvm *kvm,
|
||||||
union kvm_ioapic_redirect_entry *entry,
|
union kvm_ioapic_redirect_entry *entry,
|
||||||
unsigned long *deliver_bitmask)
|
unsigned long *deliver_bitmask)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
struct kvm *kvm = ioapic->kvm;
|
|
||||||
struct kvm_vcpu *vcpu;
|
struct kvm_vcpu *vcpu;
|
||||||
|
|
||||||
bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);
|
bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);
|
||||||
@@ -90,7 +89,7 @@ void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
|
|||||||
switch (entry->fields.delivery_mode) {
|
switch (entry->fields.delivery_mode) {
|
||||||
case IOAPIC_LOWEST_PRIORITY:
|
case IOAPIC_LOWEST_PRIORITY:
|
||||||
/* Select one in deliver_bitmask */
|
/* Select one in deliver_bitmask */
|
||||||
vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm,
|
vcpu = kvm_get_lowest_prio_vcpu(kvm,
|
||||||
entry->fields.vector, deliver_bitmask);
|
entry->fields.vector, deliver_bitmask);
|
||||||
bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);
|
bitmap_zero(deliver_bitmask, KVM_MAX_VCPUS);
|
||||||
if (!vcpu)
|
if (!vcpu)
|
||||||
@@ -111,13 +110,7 @@ void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
|
|||||||
static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
|
static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
|
||||||
struct kvm *kvm, int level)
|
struct kvm *kvm, int level)
|
||||||
{
|
{
|
||||||
int vcpu_id, r = -1;
|
|
||||||
struct kvm_vcpu *vcpu;
|
|
||||||
struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
|
|
||||||
union kvm_ioapic_redirect_entry entry;
|
union kvm_ioapic_redirect_entry entry;
|
||||||
DECLARE_BITMAP(deliver_bitmask, KVM_MAX_VCPUS);
|
|
||||||
|
|
||||||
BUG_ON(!ioapic);
|
|
||||||
|
|
||||||
entry.bits = 0;
|
entry.bits = 0;
|
||||||
entry.fields.dest_id = (e->msi.address_lo &
|
entry.fields.dest_id = (e->msi.address_lo &
|
||||||
@@ -133,26 +126,7 @@ static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
|
|||||||
(unsigned long *)&e->msi.data);
|
(unsigned long *)&e->msi.data);
|
||||||
|
|
||||||
/* TODO Deal with RH bit of MSI message address */
|
/* TODO Deal with RH bit of MSI message address */
|
||||||
|
return ioapic_deliver_entry(kvm, &entry);
|
||||||
kvm_get_intr_delivery_bitmask(ioapic, &entry, deliver_bitmask);
|
|
||||||
|
|
||||||
if (find_first_bit(deliver_bitmask, KVM_MAX_VCPUS) >= KVM_MAX_VCPUS) {
|
|
||||||
printk(KERN_WARNING "kvm: no destination for MSI delivery!");
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
while ((vcpu_id = find_first_bit(deliver_bitmask,
|
|
||||||
KVM_MAX_VCPUS)) < KVM_MAX_VCPUS) {
|
|
||||||
__clear_bit(vcpu_id, deliver_bitmask);
|
|
||||||
vcpu = ioapic->kvm->vcpus[vcpu_id];
|
|
||||||
if (vcpu) {
|
|
||||||
if (r < 0)
|
|
||||||
r = 0;
|
|
||||||
r += kvm_apic_set_irq(vcpu, entry.fields.vector,
|
|
||||||
entry.fields.dest_mode,
|
|
||||||
entry.fields.trig_mode);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return r;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This should be called with the kvm->lock mutex held
|
/* This should be called with the kvm->lock mutex held
|
||||||
|
Reference in New Issue
Block a user