Merge branch 'x86/cpu' into perf/core
Merge this branch because we changed the wrmsr*_safe() API and there's a conflict. Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -115,8 +115,8 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
|
|||||||
|
|
||||||
extern unsigned long long native_read_tsc(void);
|
extern unsigned long long native_read_tsc(void);
|
||||||
|
|
||||||
extern int native_rdmsr_safe_regs(u32 regs[8]);
|
extern int rdmsr_safe_regs(u32 regs[8]);
|
||||||
extern int native_wrmsr_safe_regs(u32 regs[8]);
|
extern int wrmsr_safe_regs(u32 regs[8]);
|
||||||
|
|
||||||
static __always_inline unsigned long long __native_read_tsc(void)
|
static __always_inline unsigned long long __native_read_tsc(void)
|
||||||
{
|
{
|
||||||
@@ -187,43 +187,6 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
|
|
||||||
{
|
|
||||||
u32 gprs[8] = { 0 };
|
|
||||||
int err;
|
|
||||||
|
|
||||||
gprs[1] = msr;
|
|
||||||
gprs[7] = 0x9c5a203a;
|
|
||||||
|
|
||||||
err = native_rdmsr_safe_regs(gprs);
|
|
||||||
|
|
||||||
*p = gprs[0] | ((u64)gprs[2] << 32);
|
|
||||||
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
|
|
||||||
{
|
|
||||||
u32 gprs[8] = { 0 };
|
|
||||||
|
|
||||||
gprs[0] = (u32)val;
|
|
||||||
gprs[1] = msr;
|
|
||||||
gprs[2] = val >> 32;
|
|
||||||
gprs[7] = 0x9c5a203a;
|
|
||||||
|
|
||||||
return native_wrmsr_safe_regs(gprs);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int rdmsr_safe_regs(u32 regs[8])
|
|
||||||
{
|
|
||||||
return native_rdmsr_safe_regs(regs);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int wrmsr_safe_regs(u32 regs[8])
|
|
||||||
{
|
|
||||||
return native_wrmsr_safe_regs(regs);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define rdtscl(low) \
|
#define rdtscl(low) \
|
||||||
((low) = (u32)__native_read_tsc())
|
((low) = (u32)__native_read_tsc())
|
||||||
|
|
||||||
@@ -250,8 +213,7 @@ do { \
|
|||||||
|
|
||||||
#endif /* !CONFIG_PARAVIRT */
|
#endif /* !CONFIG_PARAVIRT */
|
||||||
|
|
||||||
|
#define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val), \
|
||||||
#define checking_wrmsrl(msr, val) wrmsr_safe((msr), (u32)(val), \
|
|
||||||
(u32)((val) >> 32))
|
(u32)((val) >> 32))
|
||||||
|
|
||||||
#define write_tsc(val1, val2) wrmsr(MSR_IA32_TSC, (val1), (val2))
|
#define write_tsc(val1, val2) wrmsr(MSR_IA32_TSC, (val1), (val2))
|
||||||
|
@@ -128,21 +128,11 @@ static inline u64 paravirt_read_msr(unsigned msr, int *err)
|
|||||||
return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
|
return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int paravirt_rdmsr_regs(u32 *regs)
|
|
||||||
{
|
|
||||||
return PVOP_CALL1(int, pv_cpu_ops.rdmsr_regs, regs);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
|
static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
|
||||||
{
|
{
|
||||||
return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
|
return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int paravirt_wrmsr_regs(u32 *regs)
|
|
||||||
{
|
|
||||||
return PVOP_CALL1(int, pv_cpu_ops.wrmsr_regs, regs);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* These should all do BUG_ON(_err), but our headers are too tangled. */
|
/* These should all do BUG_ON(_err), but our headers are too tangled. */
|
||||||
#define rdmsr(msr, val1, val2) \
|
#define rdmsr(msr, val1, val2) \
|
||||||
do { \
|
do { \
|
||||||
@@ -176,9 +166,6 @@ do { \
|
|||||||
_err; \
|
_err; \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define rdmsr_safe_regs(regs) paravirt_rdmsr_regs(regs)
|
|
||||||
#define wrmsr_safe_regs(regs) paravirt_wrmsr_regs(regs)
|
|
||||||
|
|
||||||
static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
|
static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
@@ -186,32 +173,6 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
|
|||||||
*p = paravirt_read_msr(msr, &err);
|
*p = paravirt_read_msr(msr, &err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
|
|
||||||
{
|
|
||||||
u32 gprs[8] = { 0 };
|
|
||||||
int err;
|
|
||||||
|
|
||||||
gprs[1] = msr;
|
|
||||||
gprs[7] = 0x9c5a203a;
|
|
||||||
|
|
||||||
err = paravirt_rdmsr_regs(gprs);
|
|
||||||
|
|
||||||
*p = gprs[0] | ((u64)gprs[2] << 32);
|
|
||||||
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
|
|
||||||
{
|
|
||||||
u32 gprs[8] = { 0 };
|
|
||||||
|
|
||||||
gprs[0] = (u32)val;
|
|
||||||
gprs[1] = msr;
|
|
||||||
gprs[2] = val >> 32;
|
|
||||||
gprs[7] = 0x9c5a203a;
|
|
||||||
|
|
||||||
return paravirt_wrmsr_regs(gprs);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u64 paravirt_read_tsc(void)
|
static inline u64 paravirt_read_tsc(void)
|
||||||
{
|
{
|
||||||
|
@@ -153,9 +153,7 @@ struct pv_cpu_ops {
|
|||||||
/* MSR, PMC and TSR operations.
|
/* MSR, PMC and TSR operations.
|
||||||
err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
|
err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
|
||||||
u64 (*read_msr)(unsigned int msr, int *err);
|
u64 (*read_msr)(unsigned int msr, int *err);
|
||||||
int (*rdmsr_regs)(u32 *regs);
|
|
||||||
int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
|
int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
|
||||||
int (*wrmsr_regs)(u32 *regs);
|
|
||||||
|
|
||||||
u64 (*read_tsc)(void);
|
u64 (*read_tsc)(void);
|
||||||
u64 (*read_pmc)(int counter);
|
u64 (*read_pmc)(int counter);
|
||||||
|
@@ -19,6 +19,39 @@
|
|||||||
|
|
||||||
#include "cpu.h"
|
#include "cpu.h"
|
||||||
|
|
||||||
|
static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
|
||||||
|
{
|
||||||
|
struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
|
||||||
|
u32 gprs[8] = { 0 };
|
||||||
|
int err;
|
||||||
|
|
||||||
|
WARN_ONCE((c->x86 != 0xf), "%s should only be used on K8!\n", __func__);
|
||||||
|
|
||||||
|
gprs[1] = msr;
|
||||||
|
gprs[7] = 0x9c5a203a;
|
||||||
|
|
||||||
|
err = rdmsr_safe_regs(gprs);
|
||||||
|
|
||||||
|
*p = gprs[0] | ((u64)gprs[2] << 32);
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
|
||||||
|
{
|
||||||
|
struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
|
||||||
|
u32 gprs[8] = { 0 };
|
||||||
|
|
||||||
|
WARN_ONCE((c->x86 != 0xf), "%s should only be used on K8!\n", __func__);
|
||||||
|
|
||||||
|
gprs[0] = (u32)val;
|
||||||
|
gprs[1] = msr;
|
||||||
|
gprs[2] = val >> 32;
|
||||||
|
gprs[7] = 0x9c5a203a;
|
||||||
|
|
||||||
|
return wrmsr_safe_regs(gprs);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
/*
|
/*
|
||||||
* B step AMD K6 before B 9730xxxx have hardware bugs that can cause
|
* B step AMD K6 before B 9730xxxx have hardware bugs that can cause
|
||||||
@@ -586,9 +619,9 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
|
|||||||
!cpu_has(c, X86_FEATURE_TOPOEXT)) {
|
!cpu_has(c, X86_FEATURE_TOPOEXT)) {
|
||||||
u64 val;
|
u64 val;
|
||||||
|
|
||||||
if (!rdmsrl_amd_safe(0xc0011005, &val)) {
|
if (!rdmsrl_safe(0xc0011005, &val)) {
|
||||||
val |= 1ULL << 54;
|
val |= 1ULL << 54;
|
||||||
wrmsrl_amd_safe(0xc0011005, val);
|
wrmsrl_safe(0xc0011005, val);
|
||||||
rdmsrl(0xc0011005, val);
|
rdmsrl(0xc0011005, val);
|
||||||
if (val & (1ULL << 54)) {
|
if (val & (1ULL << 54)) {
|
||||||
set_cpu_cap(c, X86_FEATURE_TOPOEXT);
|
set_cpu_cap(c, X86_FEATURE_TOPOEXT);
|
||||||
@@ -679,7 +712,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
|
|||||||
err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask);
|
err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask);
|
||||||
if (err == 0) {
|
if (err == 0) {
|
||||||
mask |= (1 << 10);
|
mask |= (1 << 10);
|
||||||
checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
|
wrmsrl_safe(MSR_AMD64_MCx_MASK(4), mask);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -947,7 +947,7 @@ static void __cpuinit __print_cpu_msr(void)
|
|||||||
index_max = msr_range_array[i].max;
|
index_max = msr_range_array[i].max;
|
||||||
|
|
||||||
for (index = index_min; index < index_max; index++) {
|
for (index = index_min; index < index_max; index++) {
|
||||||
if (rdmsrl_amd_safe(index, &val))
|
if (rdmsrl_safe(index, &val))
|
||||||
continue;
|
continue;
|
||||||
printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
|
printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
|
||||||
}
|
}
|
||||||
|
@@ -211,7 +211,7 @@ static bool check_hw_exists(void)
|
|||||||
* that don't trap on the MSR access and always return 0s.
|
* that don't trap on the MSR access and always return 0s.
|
||||||
*/
|
*/
|
||||||
val = 0xabcdUL;
|
val = 0xabcdUL;
|
||||||
ret = checking_wrmsrl(x86_pmu_event_addr(0), val);
|
ret = wrmsrl_safe(x86_pmu_event_addr(0), val);
|
||||||
ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new);
|
ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new);
|
||||||
if (ret || val != val_new)
|
if (ret || val != val_new)
|
||||||
goto msr_fail;
|
goto msr_fail;
|
||||||
|
@@ -1003,11 +1003,11 @@ static void intel_pmu_reset(void)
|
|||||||
printk("clearing PMU state on CPU#%d\n", smp_processor_id());
|
printk("clearing PMU state on CPU#%d\n", smp_processor_id());
|
||||||
|
|
||||||
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
||||||
checking_wrmsrl(x86_pmu_config_addr(idx), 0ull);
|
wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
|
||||||
checking_wrmsrl(x86_pmu_event_addr(idx), 0ull);
|
wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
|
||||||
}
|
}
|
||||||
for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
|
for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
|
||||||
checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
|
wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
|
||||||
|
|
||||||
if (ds)
|
if (ds)
|
||||||
ds->bts_index = ds->bts_buffer_base;
|
ds->bts_index = ds->bts_buffer_base;
|
||||||
|
@@ -895,8 +895,8 @@ static void p4_pmu_disable_pebs(void)
|
|||||||
* So at moment let leave metrics turned on forever -- it's
|
* So at moment let leave metrics turned on forever -- it's
|
||||||
* ok for now but need to be revisited!
|
* ok for now but need to be revisited!
|
||||||
*
|
*
|
||||||
* (void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE, (u64)0);
|
* (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, (u64)0);
|
||||||
* (void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT, (u64)0);
|
* (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, (u64)0);
|
||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -909,7 +909,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event)
|
|||||||
* state we need to clear P4_CCCR_OVF, otherwise interrupt get
|
* state we need to clear P4_CCCR_OVF, otherwise interrupt get
|
||||||
* asserted again and again
|
* asserted again and again
|
||||||
*/
|
*/
|
||||||
(void)checking_wrmsrl(hwc->config_base,
|
(void)wrmsrl_safe(hwc->config_base,
|
||||||
(u64)(p4_config_unpack_cccr(hwc->config)) &
|
(u64)(p4_config_unpack_cccr(hwc->config)) &
|
||||||
~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED);
|
~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED);
|
||||||
}
|
}
|
||||||
@@ -943,8 +943,8 @@ static void p4_pmu_enable_pebs(u64 config)
|
|||||||
|
|
||||||
bind = &p4_pebs_bind_map[idx];
|
bind = &p4_pebs_bind_map[idx];
|
||||||
|
|
||||||
(void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE, (u64)bind->metric_pebs);
|
(void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, (u64)bind->metric_pebs);
|
||||||
(void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT, (u64)bind->metric_vert);
|
(void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, (u64)bind->metric_vert);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void p4_pmu_enable_event(struct perf_event *event)
|
static void p4_pmu_enable_event(struct perf_event *event)
|
||||||
@@ -978,8 +978,8 @@ static void p4_pmu_enable_event(struct perf_event *event)
|
|||||||
*/
|
*/
|
||||||
p4_pmu_enable_pebs(hwc->config);
|
p4_pmu_enable_pebs(hwc->config);
|
||||||
|
|
||||||
(void)checking_wrmsrl(escr_addr, escr_conf);
|
(void)wrmsrl_safe(escr_addr, escr_conf);
|
||||||
(void)checking_wrmsrl(hwc->config_base,
|
(void)wrmsrl_safe(hwc->config_base,
|
||||||
(cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE);
|
(cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -71,7 +71,7 @@ p6_pmu_disable_event(struct perf_event *event)
|
|||||||
if (cpuc->enabled)
|
if (cpuc->enabled)
|
||||||
val |= ARCH_PERFMON_EVENTSEL_ENABLE;
|
val |= ARCH_PERFMON_EVENTSEL_ENABLE;
|
||||||
|
|
||||||
(void)checking_wrmsrl(hwc->config_base, val);
|
(void)wrmsrl_safe(hwc->config_base, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void p6_pmu_enable_event(struct perf_event *event)
|
static void p6_pmu_enable_event(struct perf_event *event)
|
||||||
@@ -84,7 +84,7 @@ static void p6_pmu_enable_event(struct perf_event *event)
|
|||||||
if (cpuc->enabled)
|
if (cpuc->enabled)
|
||||||
val |= ARCH_PERFMON_EVENTSEL_ENABLE;
|
val |= ARCH_PERFMON_EVENTSEL_ENABLE;
|
||||||
|
|
||||||
(void)checking_wrmsrl(hwc->config_base, val);
|
(void)wrmsrl_safe(hwc->config_base, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
PMU_FORMAT_ATTR(event, "config:0-7" );
|
PMU_FORMAT_ATTR(event, "config:0-7" );
|
||||||
|
@@ -352,9 +352,7 @@ struct pv_cpu_ops pv_cpu_ops = {
|
|||||||
#endif
|
#endif
|
||||||
.wbinvd = native_wbinvd,
|
.wbinvd = native_wbinvd,
|
||||||
.read_msr = native_read_msr_safe,
|
.read_msr = native_read_msr_safe,
|
||||||
.rdmsr_regs = native_rdmsr_safe_regs,
|
|
||||||
.write_msr = native_write_msr_safe,
|
.write_msr = native_write_msr_safe,
|
||||||
.wrmsr_regs = native_wrmsr_safe_regs,
|
|
||||||
.read_tsc = native_read_tsc,
|
.read_tsc = native_read_tsc,
|
||||||
.read_pmc = native_read_pmc,
|
.read_pmc = native_read_pmc,
|
||||||
.read_tscp = native_read_tscp,
|
.read_tscp = native_read_tscp,
|
||||||
|
@@ -466,7 +466,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
|
|||||||
task->thread.gs = addr;
|
task->thread.gs = addr;
|
||||||
if (doit) {
|
if (doit) {
|
||||||
load_gs_index(0);
|
load_gs_index(0);
|
||||||
ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
|
ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
put_cpu();
|
put_cpu();
|
||||||
@@ -494,7 +494,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
|
|||||||
/* set the selector to 0 to not confuse
|
/* set the selector to 0 to not confuse
|
||||||
__switch_to */
|
__switch_to */
|
||||||
loadsegment(fs, 0);
|
loadsegment(fs, 0);
|
||||||
ret = checking_wrmsrl(MSR_FS_BASE, addr);
|
ret = wrmsrl_safe(MSR_FS_BASE, addr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
put_cpu();
|
put_cpu();
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <asm/msr.h>
|
#include <asm/msr.h>
|
||||||
|
|
||||||
EXPORT_SYMBOL(native_rdmsr_safe_regs);
|
EXPORT_SYMBOL(rdmsr_safe_regs);
|
||||||
EXPORT_SYMBOL(native_wrmsr_safe_regs);
|
EXPORT_SYMBOL(wrmsr_safe_regs);
|
||||||
|
@@ -6,13 +6,13 @@
|
|||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
/*
|
/*
|
||||||
* int native_{rdmsr,wrmsr}_safe_regs(u32 gprs[8]);
|
* int {rdmsr,wrmsr}_safe_regs(u32 gprs[8]);
|
||||||
*
|
*
|
||||||
* reg layout: u32 gprs[eax, ecx, edx, ebx, esp, ebp, esi, edi]
|
* reg layout: u32 gprs[eax, ecx, edx, ebx, esp, ebp, esi, edi]
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
.macro op_safe_regs op
|
.macro op_safe_regs op
|
||||||
ENTRY(native_\op\()_safe_regs)
|
ENTRY(\op\()_safe_regs)
|
||||||
CFI_STARTPROC
|
CFI_STARTPROC
|
||||||
pushq_cfi %rbx
|
pushq_cfi %rbx
|
||||||
pushq_cfi %rbp
|
pushq_cfi %rbp
|
||||||
@@ -45,13 +45,13 @@ ENTRY(native_\op\()_safe_regs)
|
|||||||
|
|
||||||
_ASM_EXTABLE(1b, 3b)
|
_ASM_EXTABLE(1b, 3b)
|
||||||
CFI_ENDPROC
|
CFI_ENDPROC
|
||||||
ENDPROC(native_\op\()_safe_regs)
|
ENDPROC(\op\()_safe_regs)
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
#else /* X86_32 */
|
#else /* X86_32 */
|
||||||
|
|
||||||
.macro op_safe_regs op
|
.macro op_safe_regs op
|
||||||
ENTRY(native_\op\()_safe_regs)
|
ENTRY(\op\()_safe_regs)
|
||||||
CFI_STARTPROC
|
CFI_STARTPROC
|
||||||
pushl_cfi %ebx
|
pushl_cfi %ebx
|
||||||
pushl_cfi %ebp
|
pushl_cfi %ebp
|
||||||
@@ -92,7 +92,7 @@ ENTRY(native_\op\()_safe_regs)
|
|||||||
|
|
||||||
_ASM_EXTABLE(1b, 3b)
|
_ASM_EXTABLE(1b, 3b)
|
||||||
CFI_ENDPROC
|
CFI_ENDPROC
|
||||||
ENDPROC(native_\op\()_safe_regs)
|
ENDPROC(\op\()_safe_regs)
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@@ -205,9 +205,9 @@ void syscall32_cpu_init(void)
|
|||||||
{
|
{
|
||||||
/* Load these always in case some future AMD CPU supports
|
/* Load these always in case some future AMD CPU supports
|
||||||
SYSENTER from compat mode too. */
|
SYSENTER from compat mode too. */
|
||||||
checking_wrmsrl(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
|
wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
|
||||||
checking_wrmsrl(MSR_IA32_SYSENTER_ESP, 0ULL);
|
wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
|
||||||
checking_wrmsrl(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
|
wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
|
||||||
|
|
||||||
wrmsrl(MSR_CSTAR, ia32_cstar_target);
|
wrmsrl(MSR_CSTAR, ia32_cstar_target);
|
||||||
}
|
}
|
||||||
|
@@ -1124,9 +1124,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
|
|||||||
.wbinvd = native_wbinvd,
|
.wbinvd = native_wbinvd,
|
||||||
|
|
||||||
.read_msr = native_read_msr_safe,
|
.read_msr = native_read_msr_safe,
|
||||||
.rdmsr_regs = native_rdmsr_safe_regs,
|
|
||||||
.write_msr = xen_write_msr_safe,
|
.write_msr = xen_write_msr_safe,
|
||||||
.wrmsr_regs = native_wrmsr_safe_regs,
|
|
||||||
|
|
||||||
.read_tsc = native_read_tsc,
|
.read_tsc = native_read_tsc,
|
||||||
.read_pmc = native_read_pmc,
|
.read_pmc = native_read_pmc,
|
||||||
|
Reference in New Issue
Block a user