Merge branch 'kvm-updates/3.2' of git://git.kernel.org/pub/scm/virt/kvm/kvm
* 'kvm-updates/3.2' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM guest: prevent tracing recursion with kvmclock Revert "KVM: PPC: Add support for explicit HIOR setting" KVM: VMX: Check for automatic switch msr table overflow KVM: VMX: Add support for guest/host-only profiling KVM: VMX: add support for switching of PERF_GLOBAL_CTRL KVM: s390: announce SYNC_MMU KVM: s390: Fix tprot locking KVM: s390: handle SIGP sense running intercepts KVM: s390: Fix RUNNING flag misinterpretation
This commit is contained in:
@ -74,9 +74,10 @@ static cycle_t kvm_clock_read(void)
|
||||
struct pvclock_vcpu_time_info *src;
|
||||
cycle_t ret;
|
||||
|
||||
src = &get_cpu_var(hv_clock);
|
||||
preempt_disable_notrace();
|
||||
src = &__get_cpu_var(hv_clock);
|
||||
ret = pvclock_clocksource_read(src);
|
||||
put_cpu_var(hv_clock);
|
||||
preempt_enable_notrace();
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -39,6 +39,7 @@
|
||||
#include <asm/mce.h>
|
||||
#include <asm/i387.h>
|
||||
#include <asm/xcr.h>
|
||||
#include <asm/perf_event.h>
|
||||
|
||||
#include "trace.h"
|
||||
|
||||
@ -118,7 +119,7 @@ module_param(ple_gap, int, S_IRUGO);
|
||||
static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
|
||||
module_param(ple_window, int, S_IRUGO);
|
||||
|
||||
#define NR_AUTOLOAD_MSRS 1
|
||||
#define NR_AUTOLOAD_MSRS 8
|
||||
#define VMCS02_POOL_SIZE 1
|
||||
|
||||
struct vmcs {
|
||||
@ -622,6 +623,7 @@ static unsigned long *vmx_msr_bitmap_legacy;
|
||||
static unsigned long *vmx_msr_bitmap_longmode;
|
||||
|
||||
static bool cpu_has_load_ia32_efer;
|
||||
static bool cpu_has_load_perf_global_ctrl;
|
||||
|
||||
static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
|
||||
static DEFINE_SPINLOCK(vmx_vpid_lock);
|
||||
@ -1191,15 +1193,34 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
|
||||
vmcs_write32(EXCEPTION_BITMAP, eb);
|
||||
}
|
||||
|
||||
static void clear_atomic_switch_msr_special(unsigned long entry,
|
||||
unsigned long exit)
|
||||
{
|
||||
vmcs_clear_bits(VM_ENTRY_CONTROLS, entry);
|
||||
vmcs_clear_bits(VM_EXIT_CONTROLS, exit);
|
||||
}
|
||||
|
||||
static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
|
||||
{
|
||||
unsigned i;
|
||||
struct msr_autoload *m = &vmx->msr_autoload;
|
||||
|
||||
if (msr == MSR_EFER && cpu_has_load_ia32_efer) {
|
||||
vmcs_clear_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER);
|
||||
vmcs_clear_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER);
|
||||
return;
|
||||
switch (msr) {
|
||||
case MSR_EFER:
|
||||
if (cpu_has_load_ia32_efer) {
|
||||
clear_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
|
||||
VM_EXIT_LOAD_IA32_EFER);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
case MSR_CORE_PERF_GLOBAL_CTRL:
|
||||
if (cpu_has_load_perf_global_ctrl) {
|
||||
clear_atomic_switch_msr_special(
|
||||
VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
|
||||
VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = 0; i < m->nr; ++i)
|
||||
@ -1215,25 +1236,55 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
|
||||
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
|
||||
}
|
||||
|
||||
static void add_atomic_switch_msr_special(unsigned long entry,
|
||||
unsigned long exit, unsigned long guest_val_vmcs,
|
||||
unsigned long host_val_vmcs, u64 guest_val, u64 host_val)
|
||||
{
|
||||
vmcs_write64(guest_val_vmcs, guest_val);
|
||||
vmcs_write64(host_val_vmcs, host_val);
|
||||
vmcs_set_bits(VM_ENTRY_CONTROLS, entry);
|
||||
vmcs_set_bits(VM_EXIT_CONTROLS, exit);
|
||||
}
|
||||
|
||||
static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
|
||||
u64 guest_val, u64 host_val)
|
||||
{
|
||||
unsigned i;
|
||||
struct msr_autoload *m = &vmx->msr_autoload;
|
||||
|
||||
if (msr == MSR_EFER && cpu_has_load_ia32_efer) {
|
||||
vmcs_write64(GUEST_IA32_EFER, guest_val);
|
||||
vmcs_write64(HOST_IA32_EFER, host_val);
|
||||
vmcs_set_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER);
|
||||
vmcs_set_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER);
|
||||
return;
|
||||
switch (msr) {
|
||||
case MSR_EFER:
|
||||
if (cpu_has_load_ia32_efer) {
|
||||
add_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
|
||||
VM_EXIT_LOAD_IA32_EFER,
|
||||
GUEST_IA32_EFER,
|
||||
HOST_IA32_EFER,
|
||||
guest_val, host_val);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
case MSR_CORE_PERF_GLOBAL_CTRL:
|
||||
if (cpu_has_load_perf_global_ctrl) {
|
||||
add_atomic_switch_msr_special(
|
||||
VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
|
||||
VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
|
||||
GUEST_IA32_PERF_GLOBAL_CTRL,
|
||||
HOST_IA32_PERF_GLOBAL_CTRL,
|
||||
guest_val, host_val);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = 0; i < m->nr; ++i)
|
||||
if (m->guest[i].index == msr)
|
||||
break;
|
||||
|
||||
if (i == m->nr) {
|
||||
if (i == NR_AUTOLOAD_MSRS) {
|
||||
printk_once(KERN_WARNING"Not enough mst switch entries. "
|
||||
"Can't add msr %x\n", msr);
|
||||
return;
|
||||
} else if (i == m->nr) {
|
||||
++m->nr;
|
||||
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
|
||||
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
|
||||
@ -2455,6 +2506,42 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
|
||||
&& allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
|
||||
VM_EXIT_LOAD_IA32_EFER);
|
||||
|
||||
cpu_has_load_perf_global_ctrl =
|
||||
allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
|
||||
VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
|
||||
&& allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
|
||||
VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
|
||||
|
||||
/*
|
||||
* Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL
|
||||
* but due to arrata below it can't be used. Workaround is to use
|
||||
* msr load mechanism to switch IA32_PERF_GLOBAL_CTRL.
|
||||
*
|
||||
* VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32]
|
||||
*
|
||||
* AAK155 (model 26)
|
||||
* AAP115 (model 30)
|
||||
* AAT100 (model 37)
|
||||
* BC86,AAY89,BD102 (model 44)
|
||||
* BA97 (model 46)
|
||||
*
|
||||
*/
|
||||
if (cpu_has_load_perf_global_ctrl && boot_cpu_data.x86 == 0x6) {
|
||||
switch (boot_cpu_data.x86_model) {
|
||||
case 26:
|
||||
case 30:
|
||||
case 37:
|
||||
case 44:
|
||||
case 46:
|
||||
cpu_has_load_perf_global_ctrl = false;
|
||||
printk_once(KERN_WARNING"kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
|
||||
"does not work properly. Using workaround\n");
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -5968,6 +6055,24 @@ static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
|
||||
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
|
||||
}
|
||||
|
||||
static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
|
||||
{
|
||||
int i, nr_msrs;
|
||||
struct perf_guest_switch_msr *msrs;
|
||||
|
||||
msrs = perf_guest_get_msrs(&nr_msrs);
|
||||
|
||||
if (!msrs)
|
||||
return;
|
||||
|
||||
for (i = 0; i < nr_msrs; i++)
|
||||
if (msrs[i].host == msrs[i].guest)
|
||||
clear_atomic_switch_msr(vmx, msrs[i].msr);
|
||||
else
|
||||
add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
|
||||
msrs[i].host);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#define R "r"
|
||||
#define Q "q"
|
||||
@ -6017,6 +6122,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
|
||||
vmx_set_interrupt_shadow(vcpu, 0);
|
||||
|
||||
atomic_switch_perf_msrs(vmx);
|
||||
|
||||
vmx->__launched = vmx->loaded_vmcs->launched;
|
||||
asm(
|
||||
/* Store host registers */
|
||||
|
Reference in New Issue
Block a user