Merge branch 'x86/urgent' of into irq/sparseirq
Reason: Pull in the latest io_apic bugfixes Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
@@ -58,7 +58,19 @@ static void parse_earlyprintk(void)
|
||||
if (arg[pos] == ',')
|
||||
pos++;
|
||||
|
||||
if (!strncmp(arg, "ttyS", 4)) {
|
||||
/*
|
||||
* make sure we have
|
||||
* "serial,0x3f8,115200"
|
||||
* "serial,ttyS0,115200"
|
||||
* "ttyS0,115200"
|
||||
*/
|
||||
if (pos == 7 && !strncmp(arg + pos, "0x", 2)) {
|
||||
port = simple_strtoull(arg + pos, &e, 16);
|
||||
if (port == 0 || arg + pos == e)
|
||||
port = DEFAULT_SERIAL_PORT;
|
||||
else
|
||||
pos = e - arg;
|
||||
} else if (!strncmp(arg + pos, "ttyS", 4)) {
|
||||
static const int bases[] = { 0x3f8, 0x2f8 };
|
||||
int idx = 0;
|
||||
|
||||
|
@@ -38,4 +38,10 @@ static inline void amd_iommu_stats_init(void) { }
|
||||
|
||||
#endif /* !CONFIG_AMD_IOMMU_STATS */
|
||||
|
||||
static inline bool is_rd890_iommu(struct pci_dev *pdev)
|
||||
{
|
||||
return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
|
||||
(pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
|
||||
}
|
||||
|
||||
#endif /* _ASM_X86_AMD_IOMMU_PROTO_H */
|
||||
|
@@ -368,6 +368,9 @@ struct amd_iommu {
|
||||
/* capabilities of that IOMMU read from ACPI */
|
||||
u32 cap;
|
||||
|
||||
/* flags read from acpi table */
|
||||
u8 acpi_flags;
|
||||
|
||||
/*
|
||||
* Capability pointer. There could be more than one IOMMU per PCI
|
||||
* device function if there are more than one AMD IOMMU capability
|
||||
@@ -411,6 +414,15 @@ struct amd_iommu {
|
||||
|
||||
/* default dma_ops domain for that IOMMU */
|
||||
struct dma_ops_domain *default_dom;
|
||||
|
||||
/*
|
||||
* This array is required to work around a potential BIOS bug.
|
||||
* The BIOS may miss to restore parts of the PCI configuration
|
||||
* space when the system resumes from S3. The result is that the
|
||||
* IOMMU does not execute commands anymore which leads to system
|
||||
* failure.
|
||||
*/
|
||||
u32 cache_cfg[4];
|
||||
};
|
||||
|
||||
/*
|
||||
|
@@ -309,7 +309,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
|
||||
static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
|
||||
{
|
||||
return ((1UL << (nr % BITS_PER_LONG)) &
|
||||
(((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
|
||||
(addr[nr / BITS_PER_LONG])) != 0;
|
||||
}
|
||||
|
||||
static inline int variable_test_bit(int nr, volatile const unsigned long *addr)
|
||||
|
@@ -168,6 +168,7 @@
|
||||
#define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */
|
||||
#define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */
|
||||
#define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */
|
||||
#define X86_FEATURE_DTS (7*32+ 7) /* Digital Thermal Sensor */
|
||||
|
||||
/* Virtualization flags: Linux defined, word 8 */
|
||||
#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
|
||||
|
@@ -20,7 +20,7 @@ struct arch_hw_breakpoint {
|
||||
#include <linux/list.h>
|
||||
|
||||
/* Available HW breakpoint length encodings */
|
||||
#define X86_BREAKPOINT_LEN_X 0x00
|
||||
#define X86_BREAKPOINT_LEN_X 0x40
|
||||
#define X86_BREAKPOINT_LEN_1 0x40
|
||||
#define X86_BREAKPOINT_LEN_2 0x44
|
||||
#define X86_BREAKPOINT_LEN_4 0x4c
|
||||
|
@@ -11,6 +11,8 @@ ifdef CONFIG_FUNCTION_TRACER
|
||||
CFLAGS_REMOVE_tsc.o = -pg
|
||||
CFLAGS_REMOVE_rtc.o = -pg
|
||||
CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
|
||||
CFLAGS_REMOVE_pvclock.o = -pg
|
||||
CFLAGS_REMOVE_kvmclock.o = -pg
|
||||
CFLAGS_REMOVE_ftrace.o = -pg
|
||||
CFLAGS_REMOVE_early_printk.o = -pg
|
||||
endif
|
||||
|
@@ -1953,6 +1953,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
|
||||
size_t size,
|
||||
int dir)
|
||||
{
|
||||
dma_addr_t flush_addr;
|
||||
dma_addr_t i, start;
|
||||
unsigned int pages;
|
||||
|
||||
@@ -1960,6 +1961,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
|
||||
(dma_addr + size > dma_dom->aperture_size))
|
||||
return;
|
||||
|
||||
flush_addr = dma_addr;
|
||||
pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
|
||||
dma_addr &= PAGE_MASK;
|
||||
start = dma_addr;
|
||||
@@ -1974,7 +1976,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
|
||||
dma_ops_free_addresses(dma_dom, dma_addr, pages);
|
||||
|
||||
if (amd_iommu_unmap_flush || dma_dom->need_flush) {
|
||||
iommu_flush_pages(&dma_dom->domain, dma_addr, size);
|
||||
iommu_flush_pages(&dma_dom->domain, flush_addr, size);
|
||||
dma_dom->need_flush = false;
|
||||
}
|
||||
}
|
||||
|
@@ -632,6 +632,13 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu)
|
||||
iommu->last_device = calc_devid(MMIO_GET_BUS(range),
|
||||
MMIO_GET_LD(range));
|
||||
iommu->evt_msi_num = MMIO_MSI_NUM(misc);
|
||||
|
||||
if (is_rd890_iommu(iommu->dev)) {
|
||||
pci_read_config_dword(iommu->dev, 0xf0, &iommu->cache_cfg[0]);
|
||||
pci_read_config_dword(iommu->dev, 0xf4, &iommu->cache_cfg[1]);
|
||||
pci_read_config_dword(iommu->dev, 0xf8, &iommu->cache_cfg[2]);
|
||||
pci_read_config_dword(iommu->dev, 0xfc, &iommu->cache_cfg[3]);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -649,29 +656,9 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
|
||||
struct ivhd_entry *e;
|
||||
|
||||
/*
|
||||
* First set the recommended feature enable bits from ACPI
|
||||
* into the IOMMU control registers
|
||||
* First save the recommended feature enable bits from ACPI
|
||||
*/
|
||||
h->flags & IVHD_FLAG_HT_TUN_EN_MASK ?
|
||||
iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
|
||||
iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
|
||||
|
||||
h->flags & IVHD_FLAG_PASSPW_EN_MASK ?
|
||||
iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
|
||||
iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
|
||||
|
||||
h->flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
|
||||
iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
|
||||
iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
|
||||
|
||||
h->flags & IVHD_FLAG_ISOC_EN_MASK ?
|
||||
iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
|
||||
iommu_feature_disable(iommu, CONTROL_ISOC_EN);
|
||||
|
||||
/*
|
||||
* make IOMMU memory accesses cache coherent
|
||||
*/
|
||||
iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
|
||||
iommu->acpi_flags = h->flags;
|
||||
|
||||
/*
|
||||
* Done. Now parse the device entries
|
||||
@@ -1116,6 +1103,40 @@ static void init_device_table(void)
|
||||
}
|
||||
}
|
||||
|
||||
static void iommu_init_flags(struct amd_iommu *iommu)
|
||||
{
|
||||
iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
|
||||
iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
|
||||
iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
|
||||
|
||||
iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
|
||||
iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
|
||||
iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
|
||||
|
||||
iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
|
||||
iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
|
||||
iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
|
||||
|
||||
iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
|
||||
iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
|
||||
iommu_feature_disable(iommu, CONTROL_ISOC_EN);
|
||||
|
||||
/*
|
||||
* make IOMMU memory accesses cache coherent
|
||||
*/
|
||||
iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
|
||||
}
|
||||
|
||||
static void iommu_apply_quirks(struct amd_iommu *iommu)
|
||||
{
|
||||
if (is_rd890_iommu(iommu->dev)) {
|
||||
pci_write_config_dword(iommu->dev, 0xf0, iommu->cache_cfg[0]);
|
||||
pci_write_config_dword(iommu->dev, 0xf4, iommu->cache_cfg[1]);
|
||||
pci_write_config_dword(iommu->dev, 0xf8, iommu->cache_cfg[2]);
|
||||
pci_write_config_dword(iommu->dev, 0xfc, iommu->cache_cfg[3]);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This function finally enables all IOMMUs found in the system after
|
||||
* they have been initialized
|
||||
@@ -1126,6 +1147,8 @@ static void enable_iommus(void)
|
||||
|
||||
for_each_iommu(iommu) {
|
||||
iommu_disable(iommu);
|
||||
iommu_apply_quirks(iommu);
|
||||
iommu_init_flags(iommu);
|
||||
iommu_set_device_table(iommu);
|
||||
iommu_enable_command_buffer(iommu);
|
||||
iommu_enable_event_buffer(iommu);
|
||||
|
@@ -306,14 +306,19 @@ void arch_init_copy_chip_data(struct irq_desc *old_desc,
|
||||
|
||||
old_cfg = old_desc->chip_data;
|
||||
|
||||
memcpy(cfg, old_cfg, sizeof(struct irq_cfg));
|
||||
cfg->vector = old_cfg->vector;
|
||||
cfg->move_in_progress = old_cfg->move_in_progress;
|
||||
cpumask_copy(cfg->domain, old_cfg->domain);
|
||||
cpumask_copy(cfg->old_domain, old_cfg->old_domain);
|
||||
|
||||
init_copy_irq_2_pin(old_cfg, cfg, node);
|
||||
}
|
||||
|
||||
static void free_irq_cfg(struct irq_cfg *old_cfg)
|
||||
static void free_irq_cfg(struct irq_cfg *cfg)
|
||||
{
|
||||
kfree(old_cfg);
|
||||
free_cpumask_var(cfg->domain);
|
||||
free_cpumask_var(cfg->old_domain);
|
||||
kfree(cfg);
|
||||
}
|
||||
|
||||
void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
|
||||
|
@@ -545,7 +545,7 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
|
||||
}
|
||||
}
|
||||
|
||||
static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
|
||||
void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u32 tfms, xlvl;
|
||||
u32 ebx;
|
||||
|
@@ -33,5 +33,6 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[],
|
||||
*const __x86_cpu_dev_end[];
|
||||
|
||||
extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
|
||||
extern void get_cpu_cap(struct cpuinfo_x86 *c);
|
||||
|
||||
#endif
|
||||
|
@@ -39,6 +39,7 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
|
||||
misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
|
||||
wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
|
||||
c->cpuid_level = cpuid_eax(0);
|
||||
get_cpu_cap(c);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -102,6 +102,7 @@ struct cpu_hw_events {
|
||||
*/
|
||||
struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
|
||||
unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
|
||||
unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
|
||||
int enabled;
|
||||
|
||||
int n_events;
|
||||
@@ -1010,6 +1011,7 @@ static int x86_pmu_start(struct perf_event *event)
|
||||
x86_perf_event_set_period(event);
|
||||
cpuc->events[idx] = event;
|
||||
__set_bit(idx, cpuc->active_mask);
|
||||
__set_bit(idx, cpuc->running);
|
||||
x86_pmu.enable(event);
|
||||
perf_event_update_userpage(event);
|
||||
|
||||
@@ -1141,8 +1143,16 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
|
||||
cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
||||
if (!test_bit(idx, cpuc->active_mask))
|
||||
if (!test_bit(idx, cpuc->active_mask)) {
|
||||
/*
|
||||
* Though we deactivated the counter some cpus
|
||||
* might still deliver spurious interrupts still
|
||||
* in flight. Catch them:
|
||||
*/
|
||||
if (__test_and_clear_bit(idx, cpuc->running))
|
||||
handled++;
|
||||
continue;
|
||||
}
|
||||
|
||||
event = cpuc->events[idx];
|
||||
hwc = &event->hw;
|
||||
|
@@ -31,6 +31,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
|
||||
const struct cpuid_bit *cb;
|
||||
|
||||
static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
|
||||
{ X86_FEATURE_DTS, CR_EAX, 0, 0x00000006, 0 },
|
||||
{ X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 },
|
||||
{ X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 },
|
||||
{ X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 },
|
||||
|
@@ -506,7 +506,7 @@ static int hpet_assign_irq(struct hpet_dev *dev)
|
||||
{
|
||||
unsigned int irq;
|
||||
|
||||
irq = create_irq();
|
||||
irq = create_irq_nr(0, -1);
|
||||
if (!irq)
|
||||
return -EINVAL;
|
||||
|
||||
|
@@ -206,11 +206,27 @@ int arch_check_bp_in_kernelspace(struct perf_event *bp)
|
||||
int arch_bp_generic_fields(int x86_len, int x86_type,
|
||||
int *gen_len, int *gen_type)
|
||||
{
|
||||
/* Type */
|
||||
switch (x86_type) {
|
||||
case X86_BREAKPOINT_EXECUTE:
|
||||
if (x86_len != X86_BREAKPOINT_LEN_X)
|
||||
return -EINVAL;
|
||||
|
||||
*gen_type = HW_BREAKPOINT_X;
|
||||
*gen_len = sizeof(long);
|
||||
return 0;
|
||||
case X86_BREAKPOINT_WRITE:
|
||||
*gen_type = HW_BREAKPOINT_W;
|
||||
break;
|
||||
case X86_BREAKPOINT_RW:
|
||||
*gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Len */
|
||||
switch (x86_len) {
|
||||
case X86_BREAKPOINT_LEN_X:
|
||||
*gen_len = sizeof(long);
|
||||
break;
|
||||
case X86_BREAKPOINT_LEN_1:
|
||||
*gen_len = HW_BREAKPOINT_LEN_1;
|
||||
break;
|
||||
@@ -229,21 +245,6 @@ int arch_bp_generic_fields(int x86_len, int x86_type,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Type */
|
||||
switch (x86_type) {
|
||||
case X86_BREAKPOINT_EXECUTE:
|
||||
*gen_type = HW_BREAKPOINT_X;
|
||||
break;
|
||||
case X86_BREAKPOINT_WRITE:
|
||||
*gen_type = HW_BREAKPOINT_W;
|
||||
break;
|
||||
case X86_BREAKPOINT_RW:
|
||||
*gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -316,9 +317,6 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
|
||||
ret = -EINVAL;
|
||||
|
||||
switch (info->len) {
|
||||
case X86_BREAKPOINT_LEN_X:
|
||||
align = sizeof(long) -1;
|
||||
break;
|
||||
case X86_BREAKPOINT_LEN_1:
|
||||
align = 0;
|
||||
break;
|
||||
|
@@ -324,9 +324,8 @@ static void lguest_load_gdt(const struct desc_ptr *desc)
|
||||
}
|
||||
|
||||
/*
|
||||
* For a single GDT entry which changes, we do the lazy thing: alter our GDT,
|
||||
* then tell the Host to reload the entire thing. This operation is so rare
|
||||
* that this naive implementation is reasonable.
|
||||
* For a single GDT entry which changes, we simply change our copy and
|
||||
* then tell the host about it.
|
||||
*/
|
||||
static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
|
||||
const void *desc, int type)
|
||||
@@ -338,9 +337,13 @@ static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
|
||||
}
|
||||
|
||||
/*
|
||||
* OK, I lied. There are three "thread local storage" GDT entries which change
|
||||
* There are three "thread local storage" GDT entries which change
|
||||
* on every context switch (these three entries are how glibc implements
|
||||
* __thread variables). So we have a hypercall specifically for this case.
|
||||
* __thread variables). As an optimization, we have a hypercall
|
||||
* specifically for this case.
|
||||
*
|
||||
* Wouldn't it be nicer to have a general LOAD_GDT_ENTRIES hypercall
|
||||
* which took a range of entries?
|
||||
*/
|
||||
static void lguest_load_tls(struct thread_struct *t, unsigned int cpu)
|
||||
{
|
||||
|
@@ -671,7 +671,9 @@ static int __init ppro_init(char **cpu_type)
|
||||
case 14:
|
||||
*cpu_type = "i386/core";
|
||||
break;
|
||||
case 15: case 23:
|
||||
case 0x0f:
|
||||
case 0x16:
|
||||
case 0x17:
|
||||
*cpu_type = "i386/core_2";
|
||||
break;
|
||||
case 0x1a:
|
||||
|
Reference in New Issue
Block a user