Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86, asm: Clean up desc.h a bit x86, amd: Do not enable ARAT feature on AMD processors below family 0x12 x86: Move do_page_fault()'s error path under unlikely() x86, efi: Retain boot service code until after switching to virtual mode x86: Remove unnecessary check in detect_ht() x86: Reorder mm_context_t to remove x86_64 alignment padding and thus shrink mm_struct x86, UV: Clean up uv_tlb.c x86, UV: Add support for SGI UV2 hub chip x86, cpufeature: Update CPU feature RDRND to RDRAND
This commit is contained in:
@@ -125,7 +125,7 @@
|
|||||||
#define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */
|
#define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */
|
||||||
#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */
|
#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */
|
||||||
#define X86_FEATURE_F16C (4*32+29) /* 16-bit fp conversions */
|
#define X86_FEATURE_F16C (4*32+29) /* 16-bit fp conversions */
|
||||||
#define X86_FEATURE_RDRND (4*32+30) /* The RDRAND instruction */
|
#define X86_FEATURE_RDRAND (4*32+30) /* The RDRAND instruction */
|
||||||
#define X86_FEATURE_HYPERVISOR (4*32+31) /* Running on a hypervisor */
|
#define X86_FEATURE_HYPERVISOR (4*32+31) /* Running on a hypervisor */
|
||||||
|
|
||||||
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
|
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
|
||||||
|
@@ -4,17 +4,19 @@
|
|||||||
#include <asm/desc_defs.h>
|
#include <asm/desc_defs.h>
|
||||||
#include <asm/ldt.h>
|
#include <asm/ldt.h>
|
||||||
#include <asm/mmu.h>
|
#include <asm/mmu.h>
|
||||||
|
|
||||||
#include <linux/smp.h>
|
#include <linux/smp.h>
|
||||||
|
|
||||||
static inline void fill_ldt(struct desc_struct *desc,
|
static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *info)
|
||||||
const struct user_desc *info)
|
|
||||||
{
|
{
|
||||||
desc->limit0 = info->limit & 0x0ffff;
|
desc->limit0 = info->limit & 0x0ffff;
|
||||||
desc->base0 = info->base_addr & 0x0000ffff;
|
|
||||||
|
|
||||||
|
desc->base0 = (info->base_addr & 0x0000ffff);
|
||||||
desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
|
desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
|
||||||
|
|
||||||
desc->type = (info->read_exec_only ^ 1) << 1;
|
desc->type = (info->read_exec_only ^ 1) << 1;
|
||||||
desc->type |= info->contents << 2;
|
desc->type |= info->contents << 2;
|
||||||
|
|
||||||
desc->s = 1;
|
desc->s = 1;
|
||||||
desc->dpl = 0x3;
|
desc->dpl = 0x3;
|
||||||
desc->p = info->seg_not_present ^ 1;
|
desc->p = info->seg_not_present ^ 1;
|
||||||
@@ -22,6 +24,7 @@ static inline void fill_ldt(struct desc_struct *desc,
|
|||||||
desc->avl = info->useable;
|
desc->avl = info->useable;
|
||||||
desc->d = info->seg_32bit;
|
desc->d = info->seg_32bit;
|
||||||
desc->g = info->limit_in_pages;
|
desc->g = info->limit_in_pages;
|
||||||
|
|
||||||
desc->base2 = (info->base_addr & 0xff000000) >> 24;
|
desc->base2 = (info->base_addr & 0xff000000) >> 24;
|
||||||
/*
|
/*
|
||||||
* Don't allow setting of the lm bit. It is useless anyway
|
* Don't allow setting of the lm bit. It is useless anyway
|
||||||
@@ -36,6 +39,7 @@ extern gate_desc idt_table[];
|
|||||||
struct gdt_page {
|
struct gdt_page {
|
||||||
struct desc_struct gdt[GDT_ENTRIES];
|
struct desc_struct gdt[GDT_ENTRIES];
|
||||||
} __attribute__((aligned(PAGE_SIZE)));
|
} __attribute__((aligned(PAGE_SIZE)));
|
||||||
|
|
||||||
DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
|
DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
|
||||||
|
|
||||||
static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
|
static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
|
||||||
@@ -66,8 +70,7 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
|
|||||||
unsigned short seg)
|
unsigned short seg)
|
||||||
{
|
{
|
||||||
gate->a = (seg << 16) | (base & 0xffff);
|
gate->a = (seg << 16) | (base & 0xffff);
|
||||||
gate->b = (base & 0xffff0000) |
|
gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
|
||||||
(((0x80 | type | (dpl << 5)) & 0xff) << 8);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
@@ -75,6 +78,7 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
|
|||||||
static inline int desc_empty(const void *ptr)
|
static inline int desc_empty(const void *ptr)
|
||||||
{
|
{
|
||||||
const u32 *desc = ptr;
|
const u32 *desc = ptr;
|
||||||
|
|
||||||
return !(desc[0] | desc[1]);
|
return !(desc[0] | desc[1]);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -94,12 +98,9 @@ static inline int desc_empty(const void *ptr)
|
|||||||
#define load_TLS(t, cpu) native_load_tls(t, cpu)
|
#define load_TLS(t, cpu) native_load_tls(t, cpu)
|
||||||
#define set_ldt native_set_ldt
|
#define set_ldt native_set_ldt
|
||||||
|
|
||||||
#define write_ldt_entry(dt, entry, desc) \
|
#define write_ldt_entry(dt, entry, desc) native_write_ldt_entry(dt, entry, desc)
|
||||||
native_write_ldt_entry(dt, entry, desc)
|
#define write_gdt_entry(dt, entry, desc, type) native_write_gdt_entry(dt, entry, desc, type)
|
||||||
#define write_gdt_entry(dt, entry, desc, type) \
|
#define write_idt_entry(dt, entry, g) native_write_idt_entry(dt, entry, g)
|
||||||
native_write_gdt_entry(dt, entry, desc, type)
|
|
||||||
#define write_idt_entry(dt, entry, g) \
|
|
||||||
native_write_idt_entry(dt, entry, g)
|
|
||||||
|
|
||||||
static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
|
static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
|
||||||
{
|
{
|
||||||
@@ -112,33 +113,27 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
|
|||||||
|
|
||||||
#define store_ldt(ldt) asm("sldt %0" : "=m"(ldt))
|
#define store_ldt(ldt) asm("sldt %0" : "=m"(ldt))
|
||||||
|
|
||||||
static inline void native_write_idt_entry(gate_desc *idt, int entry,
|
static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
|
||||||
const gate_desc *gate)
|
|
||||||
{
|
{
|
||||||
memcpy(&idt[entry], gate, sizeof(*gate));
|
memcpy(&idt[entry], gate, sizeof(*gate));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
|
static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
|
||||||
const void *desc)
|
|
||||||
{
|
{
|
||||||
memcpy(&ldt[entry], desc, 8);
|
memcpy(&ldt[entry], desc, 8);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
|
static inline void
|
||||||
const void *desc, int type)
|
native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int type)
|
||||||
{
|
{
|
||||||
unsigned int size;
|
unsigned int size;
|
||||||
|
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case DESC_TSS:
|
case DESC_TSS: size = sizeof(tss_desc); break;
|
||||||
size = sizeof(tss_desc);
|
case DESC_LDT: size = sizeof(ldt_desc); break;
|
||||||
break;
|
default: size = sizeof(*gdt); break;
|
||||||
case DESC_LDT:
|
|
||||||
size = sizeof(ldt_desc);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
size = sizeof(struct desc_struct);
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
memcpy(&gdt[entry], desc, size);
|
memcpy(&gdt[entry], desc, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -154,12 +149,13 @@ static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static inline void set_tssldt_descriptor(void *d, unsigned long addr,
|
static inline void set_tssldt_descriptor(void *d, unsigned long addr, unsigned type, unsigned size)
|
||||||
unsigned type, unsigned size)
|
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
struct ldttss_desc64 *desc = d;
|
struct ldttss_desc64 *desc = d;
|
||||||
|
|
||||||
memset(desc, 0, sizeof(*desc));
|
memset(desc, 0, sizeof(*desc));
|
||||||
|
|
||||||
desc->limit0 = size & 0xFFFF;
|
desc->limit0 = size & 0xFFFF;
|
||||||
desc->base0 = PTR_LOW(addr);
|
desc->base0 = PTR_LOW(addr);
|
||||||
desc->base1 = PTR_MIDDLE(addr) & 0xFF;
|
desc->base1 = PTR_MIDDLE(addr) & 0xFF;
|
||||||
@@ -237,14 +233,16 @@ static inline void native_store_idt(struct desc_ptr *dtr)
|
|||||||
static inline unsigned long native_store_tr(void)
|
static inline unsigned long native_store_tr(void)
|
||||||
{
|
{
|
||||||
unsigned long tr;
|
unsigned long tr;
|
||||||
|
|
||||||
asm volatile("str %0":"=r" (tr));
|
asm volatile("str %0":"=r" (tr));
|
||||||
|
|
||||||
return tr;
|
return tr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
|
static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
|
||||||
struct desc_struct *gdt = get_cpu_gdt_table(cpu);
|
struct desc_struct *gdt = get_cpu_gdt_table(cpu);
|
||||||
|
unsigned int i;
|
||||||
|
|
||||||
for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
|
for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
|
||||||
gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
|
gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
|
||||||
@@ -313,6 +311,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
|
|||||||
unsigned dpl, unsigned ist, unsigned seg)
|
unsigned dpl, unsigned ist, unsigned seg)
|
||||||
{
|
{
|
||||||
gate_desc s;
|
gate_desc s;
|
||||||
|
|
||||||
pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg);
|
pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg);
|
||||||
/*
|
/*
|
||||||
* does not need to be atomic because it is only done once at
|
* does not need to be atomic because it is only done once at
|
||||||
@@ -343,9 +342,10 @@ static inline void alloc_system_vector(int vector)
|
|||||||
set_bit(vector, used_vectors);
|
set_bit(vector, used_vectors);
|
||||||
if (first_system_vector > vector)
|
if (first_system_vector > vector)
|
||||||
first_system_vector = vector;
|
first_system_vector = vector;
|
||||||
} else
|
} else {
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static inline void alloc_intr_gate(unsigned int n, void *addr)
|
static inline void alloc_intr_gate(unsigned int n, void *addr)
|
||||||
{
|
{
|
||||||
|
@@ -11,14 +11,14 @@
|
|||||||
typedef struct {
|
typedef struct {
|
||||||
void *ldt;
|
void *ldt;
|
||||||
int size;
|
int size;
|
||||||
struct mutex lock;
|
|
||||||
void *vdso;
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
/* True if mm supports a task running in 32 bit compatibility mode. */
|
/* True if mm supports a task running in 32 bit compatibility mode. */
|
||||||
unsigned short ia32_compat;
|
unsigned short ia32_compat;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
struct mutex lock;
|
||||||
|
void *vdso;
|
||||||
} mm_context_t;
|
} mm_context_t;
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
@@ -5,7 +5,7 @@
|
|||||||
*
|
*
|
||||||
* SGI UV Broadcast Assist Unit definitions
|
* SGI UV Broadcast Assist Unit definitions
|
||||||
*
|
*
|
||||||
* Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
|
* Copyright (C) 2008-2011 Silicon Graphics, Inc. All rights reserved.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef _ASM_X86_UV_UV_BAU_H
|
#ifndef _ASM_X86_UV_UV_BAU_H
|
||||||
@@ -35,17 +35,20 @@
|
|||||||
|
|
||||||
#define MAX_CPUS_PER_UVHUB 64
|
#define MAX_CPUS_PER_UVHUB 64
|
||||||
#define MAX_CPUS_PER_SOCKET 32
|
#define MAX_CPUS_PER_SOCKET 32
|
||||||
#define UV_ADP_SIZE 64 /* hardware-provided max. */
|
#define ADP_SZ 64 /* hardware-provided max. */
|
||||||
#define UV_CPUS_PER_ACT_STATUS 32 /* hardware-provided max. */
|
#define UV_CPUS_PER_AS 32 /* hardware-provided max. */
|
||||||
#define UV_ITEMS_PER_DESCRIPTOR 8
|
#define ITEMS_PER_DESC 8
|
||||||
/* the 'throttle' to prevent the hardware stay-busy bug */
|
/* the 'throttle' to prevent the hardware stay-busy bug */
|
||||||
#define MAX_BAU_CONCURRENT 3
|
#define MAX_BAU_CONCURRENT 3
|
||||||
#define UV_ACT_STATUS_MASK 0x3
|
#define UV_ACT_STATUS_MASK 0x3
|
||||||
#define UV_ACT_STATUS_SIZE 2
|
#define UV_ACT_STATUS_SIZE 2
|
||||||
#define UV_DISTRIBUTION_SIZE 256
|
#define UV_DISTRIBUTION_SIZE 256
|
||||||
#define UV_SW_ACK_NPENDING 8
|
#define UV_SW_ACK_NPENDING 8
|
||||||
#define UV_NET_ENDPOINT_INTD 0x38
|
#define UV1_NET_ENDPOINT_INTD 0x38
|
||||||
#define UV_DESC_BASE_PNODE_SHIFT 49
|
#define UV2_NET_ENDPOINT_INTD 0x28
|
||||||
|
#define UV_NET_ENDPOINT_INTD (is_uv1_hub() ? \
|
||||||
|
UV1_NET_ENDPOINT_INTD : UV2_NET_ENDPOINT_INTD)
|
||||||
|
#define UV_DESC_PSHIFT 49
|
||||||
#define UV_PAYLOADQ_PNODE_SHIFT 49
|
#define UV_PAYLOADQ_PNODE_SHIFT 49
|
||||||
#define UV_PTC_BASENAME "sgi_uv/ptc_statistics"
|
#define UV_PTC_BASENAME "sgi_uv/ptc_statistics"
|
||||||
#define UV_BAU_BASENAME "sgi_uv/bau_tunables"
|
#define UV_BAU_BASENAME "sgi_uv/bau_tunables"
|
||||||
@@ -53,10 +56,23 @@
|
|||||||
#define UV_BAU_TUNABLES_FILE "bau_tunables"
|
#define UV_BAU_TUNABLES_FILE "bau_tunables"
|
||||||
#define WHITESPACE " \t\n"
|
#define WHITESPACE " \t\n"
|
||||||
#define uv_physnodeaddr(x) ((__pa((unsigned long)(x)) & uv_mmask))
|
#define uv_physnodeaddr(x) ((__pa((unsigned long)(x)) & uv_mmask))
|
||||||
#define UV_ENABLE_INTD_SOFT_ACK_MODE_SHIFT 15
|
#define cpubit_isset(cpu, bau_local_cpumask) \
|
||||||
#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHIFT 16
|
test_bit((cpu), (bau_local_cpumask).bits)
|
||||||
#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x0000000009UL
|
|
||||||
/* [19:16] SOFT_ACK timeout period 19: 1 is urgency 7 17:16 1 is multiplier */
|
/* [19:16] SOFT_ACK timeout period 19: 1 is urgency 7 17:16 1 is multiplier */
|
||||||
|
/*
|
||||||
|
* UV2: Bit 19 selects between
|
||||||
|
* (0): 10 microsecond timebase and
|
||||||
|
* (1): 80 microseconds
|
||||||
|
* we're using 655us, similar to UV1: 65 units of 10us
|
||||||
|
*/
|
||||||
|
#define UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD (9UL)
|
||||||
|
#define UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD (65*10UL)
|
||||||
|
|
||||||
|
#define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD (is_uv1_hub() ? \
|
||||||
|
UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD : \
|
||||||
|
UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD)
|
||||||
|
|
||||||
#define BAU_MISC_CONTROL_MULT_MASK 3
|
#define BAU_MISC_CONTROL_MULT_MASK 3
|
||||||
|
|
||||||
#define UVH_AGING_PRESCALE_SEL 0x000000b000UL
|
#define UVH_AGING_PRESCALE_SEL 0x000000b000UL
|
||||||
@@ -69,13 +85,35 @@
|
|||||||
#define BAU_TRANS_SHIFT 40
|
#define BAU_TRANS_SHIFT 40
|
||||||
#define BAU_TRANS_MASK 0x3f
|
#define BAU_TRANS_MASK 0x3f
|
||||||
|
|
||||||
|
/*
|
||||||
|
* shorten some awkward names
|
||||||
|
*/
|
||||||
|
#define AS_PUSH_SHIFT UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT
|
||||||
|
#define SOFTACK_MSHIFT UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT
|
||||||
|
#define SOFTACK_PSHIFT UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT
|
||||||
|
#define SOFTACK_TIMEOUT_PERIOD UV_INTD_SOFT_ACK_TIMEOUT_PERIOD
|
||||||
|
#define write_gmmr uv_write_global_mmr64
|
||||||
|
#define write_lmmr uv_write_local_mmr
|
||||||
|
#define read_lmmr uv_read_local_mmr
|
||||||
|
#define read_gmmr uv_read_global_mmr64
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* bits in UVH_LB_BAU_SB_ACTIVATION_STATUS_0/1
|
* bits in UVH_LB_BAU_SB_ACTIVATION_STATUS_0/1
|
||||||
*/
|
*/
|
||||||
#define DESC_STATUS_IDLE 0
|
#define DS_IDLE 0
|
||||||
#define DESC_STATUS_ACTIVE 1
|
#define DS_ACTIVE 1
|
||||||
#define DESC_STATUS_DESTINATION_TIMEOUT 2
|
#define DS_DESTINATION_TIMEOUT 2
|
||||||
#define DESC_STATUS_SOURCE_TIMEOUT 3
|
#define DS_SOURCE_TIMEOUT 3
|
||||||
|
/*
|
||||||
|
* bits put together from HRP_LB_BAU_SB_ACTIVATION_STATUS_0/1/2
|
||||||
|
* values 1 and 5 will not occur
|
||||||
|
*/
|
||||||
|
#define UV2H_DESC_IDLE 0
|
||||||
|
#define UV2H_DESC_DEST_TIMEOUT 2
|
||||||
|
#define UV2H_DESC_DEST_STRONG_NACK 3
|
||||||
|
#define UV2H_DESC_BUSY 4
|
||||||
|
#define UV2H_DESC_SOURCE_TIMEOUT 6
|
||||||
|
#define UV2H_DESC_DEST_PUT_ERR 7
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* delay for 'plugged' timeout retries, in microseconds
|
* delay for 'plugged' timeout retries, in microseconds
|
||||||
@@ -96,6 +134,15 @@
|
|||||||
|
|
||||||
#define UV_LB_SUBNODEID 0x10
|
#define UV_LB_SUBNODEID 0x10
|
||||||
|
|
||||||
|
/* these two are the same for UV1 and UV2: */
|
||||||
|
#define UV_SA_SHFT UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT
|
||||||
|
#define UV_SA_MASK UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK
|
||||||
|
/* 4 bits of software ack period */
|
||||||
|
#define UV2_ACK_MASK 0x7UL
|
||||||
|
#define UV2_ACK_UNITS_SHFT 3
|
||||||
|
#define UV2_LEG_SHFT UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT
|
||||||
|
#define UV2_EXT_SHFT UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* number of entries in the destination side payload queue
|
* number of entries in the destination side payload queue
|
||||||
*/
|
*/
|
||||||
@@ -115,9 +162,16 @@
|
|||||||
/*
|
/*
|
||||||
* tuning the action when the numalink network is extremely delayed
|
* tuning the action when the numalink network is extremely delayed
|
||||||
*/
|
*/
|
||||||
#define CONGESTED_RESPONSE_US 1000 /* 'long' response time, in microseconds */
|
#define CONGESTED_RESPONSE_US 1000 /* 'long' response time, in
|
||||||
#define CONGESTED_REPS 10 /* long delays averaged over this many broadcasts */
|
microseconds */
|
||||||
#define CONGESTED_PERIOD 30 /* time for the bau to be disabled, in seconds */
|
#define CONGESTED_REPS 10 /* long delays averaged over
|
||||||
|
this many broadcasts */
|
||||||
|
#define CONGESTED_PERIOD 30 /* time for the bau to be
|
||||||
|
disabled, in seconds */
|
||||||
|
/* see msg_type: */
|
||||||
|
#define MSG_NOOP 0
|
||||||
|
#define MSG_REGULAR 1
|
||||||
|
#define MSG_RETRY 2
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Distribution: 32 bytes (256 bits) (bytes 0-0x1f of descriptor)
|
* Distribution: 32 bytes (256 bits) (bytes 0-0x1f of descriptor)
|
||||||
@@ -129,7 +183,7 @@
|
|||||||
* 'base_dest_nasid' field of the header corresponds to the
|
* 'base_dest_nasid' field of the header corresponds to the
|
||||||
* destination nodeID associated with that specified bit.
|
* destination nodeID associated with that specified bit.
|
||||||
*/
|
*/
|
||||||
struct bau_target_uvhubmask {
|
struct bau_targ_hubmask {
|
||||||
unsigned long bits[BITS_TO_LONGS(UV_DISTRIBUTION_SIZE)];
|
unsigned long bits[BITS_TO_LONGS(UV_DISTRIBUTION_SIZE)];
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -160,8 +214,8 @@ struct bau_local_cpumask {
|
|||||||
* The payload is software-defined for INTD transactions
|
* The payload is software-defined for INTD transactions
|
||||||
*/
|
*/
|
||||||
struct bau_msg_payload {
|
struct bau_msg_payload {
|
||||||
unsigned long address; /* signifies a page or all TLB's
|
unsigned long address; /* signifies a page or all
|
||||||
of the cpu */
|
TLB's of the cpu */
|
||||||
/* 64 bits */
|
/* 64 bits */
|
||||||
unsigned short sending_cpu; /* filled in by sender */
|
unsigned short sending_cpu; /* filled in by sender */
|
||||||
/* 16 bits */
|
/* 16 bits */
|
||||||
@@ -178,8 +232,8 @@ struct bau_msg_payload {
|
|||||||
struct bau_msg_header {
|
struct bau_msg_header {
|
||||||
unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */
|
unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */
|
||||||
/* bits 5:0 */
|
/* bits 5:0 */
|
||||||
unsigned int base_dest_nasid:15; /* nasid of the */
|
unsigned int base_dest_nasid:15; /* nasid of the first bit */
|
||||||
/* bits 20:6 */ /* first bit in uvhub map */
|
/* bits 20:6 */ /* in uvhub map */
|
||||||
unsigned int command:8; /* message type */
|
unsigned int command:8; /* message type */
|
||||||
/* bits 28:21 */
|
/* bits 28:21 */
|
||||||
/* 0x38: SN3net EndPoint Message */
|
/* 0x38: SN3net EndPoint Message */
|
||||||
@@ -191,20 +245,24 @@ struct bau_msg_header {
|
|||||||
/* Suppl_A is 56-41 */
|
/* Suppl_A is 56-41 */
|
||||||
unsigned int sequence:16; /* message sequence number */
|
unsigned int sequence:16; /* message sequence number */
|
||||||
/* bits 56:41 */ /* becomes bytes 16-17 of msg */
|
/* bits 56:41 */ /* becomes bytes 16-17 of msg */
|
||||||
/* Address field (96:57) is never used as an
|
/* Address field (96:57) is
|
||||||
address (these are address bits 42:3) */
|
never used as an address
|
||||||
|
(these are address bits
|
||||||
|
42:3) */
|
||||||
|
|
||||||
unsigned int rsvd_3:1; /* must be zero */
|
unsigned int rsvd_3:1; /* must be zero */
|
||||||
/* bit 57 */
|
/* bit 57 */
|
||||||
/* address bits 27:4 are payload */
|
/* address bits 27:4 are payload */
|
||||||
/* these next 24 (58-81) bits become bytes 12-14 of msg */
|
/* these next 24 (58-81) bits become bytes 12-14 of msg */
|
||||||
|
|
||||||
/* bits 65:58 land in byte 12 */
|
/* bits 65:58 land in byte 12 */
|
||||||
unsigned int replied_to:1;/* sent as 0 by the source to byte 12 */
|
unsigned int replied_to:1; /* sent as 0 by the source to
|
||||||
|
byte 12 */
|
||||||
/* bit 58 */
|
/* bit 58 */
|
||||||
unsigned int msg_type:3; /* software type of the message*/
|
unsigned int msg_type:3; /* software type of the
|
||||||
|
message */
|
||||||
/* bits 61:59 */
|
/* bits 61:59 */
|
||||||
unsigned int canceled:1; /* message canceled, resource to be freed*/
|
unsigned int canceled:1; /* message canceled, resource
|
||||||
|
is to be freed*/
|
||||||
/* bit 62 */
|
/* bit 62 */
|
||||||
unsigned int payload_1a:1; /* not currently used */
|
unsigned int payload_1a:1; /* not currently used */
|
||||||
/* bit 63 */
|
/* bit 63 */
|
||||||
@@ -225,39 +283,38 @@ struct bau_msg_header {
|
|||||||
|
|
||||||
unsigned int rsvd_4:7; /* must be zero */
|
unsigned int rsvd_4:7; /* must be zero */
|
||||||
/* bits 88:82 */
|
/* bits 88:82 */
|
||||||
unsigned int sw_ack_flag:1;/* software acknowledge flag */
|
unsigned int swack_flag:1; /* software acknowledge flag */
|
||||||
/* bit 89 */
|
/* bit 89 */
|
||||||
/* INTD trasactions at destination are to
|
/* INTD trasactions at
|
||||||
wait for software acknowledge */
|
destination are to wait for
|
||||||
|
software acknowledge */
|
||||||
unsigned int rsvd_5:6; /* must be zero */
|
unsigned int rsvd_5:6; /* must be zero */
|
||||||
/* bits 95:90 */
|
/* bits 95:90 */
|
||||||
unsigned int rsvd_6:5; /* must be zero */
|
unsigned int rsvd_6:5; /* must be zero */
|
||||||
/* bits 100:96 */
|
/* bits 100:96 */
|
||||||
unsigned int int_both:1;/* if 1, interrupt both sockets on the uvhub */
|
unsigned int int_both:1; /* if 1, interrupt both sockets
|
||||||
|
on the uvhub */
|
||||||
/* bit 101*/
|
/* bit 101*/
|
||||||
unsigned int fairness:3; /* usually zero */
|
unsigned int fairness:3; /* usually zero */
|
||||||
/* bits 104:102 */
|
/* bits 104:102 */
|
||||||
unsigned int multilevel:1; /* multi-level multicast format */
|
unsigned int multilevel:1; /* multi-level multicast
|
||||||
|
format */
|
||||||
/* bit 105 */
|
/* bit 105 */
|
||||||
/* 0 for TLB: endpoint multi-unicast messages */
|
/* 0 for TLB: endpoint multi-unicast messages */
|
||||||
unsigned int chaining:1;/* next descriptor is part of this activation*/
|
unsigned int chaining:1; /* next descriptor is part of
|
||||||
|
this activation*/
|
||||||
/* bit 106 */
|
/* bit 106 */
|
||||||
unsigned int rsvd_7:21; /* must be zero */
|
unsigned int rsvd_7:21; /* must be zero */
|
||||||
/* bits 127:107 */
|
/* bits 127:107 */
|
||||||
};
|
};
|
||||||
|
|
||||||
/* see msg_type: */
|
|
||||||
#define MSG_NOOP 0
|
|
||||||
#define MSG_REGULAR 1
|
|
||||||
#define MSG_RETRY 2
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The activation descriptor:
|
* The activation descriptor:
|
||||||
* The format of the message to send, plus all accompanying control
|
* The format of the message to send, plus all accompanying control
|
||||||
* Should be 64 bytes
|
* Should be 64 bytes
|
||||||
*/
|
*/
|
||||||
struct bau_desc {
|
struct bau_desc {
|
||||||
struct bau_target_uvhubmask distribution;
|
struct bau_targ_hubmask distribution;
|
||||||
/*
|
/*
|
||||||
* message template, consisting of header and payload:
|
* message template, consisting of header and payload:
|
||||||
*/
|
*/
|
||||||
@@ -281,55 +338,47 @@ struct bau_desc {
|
|||||||
* are 32 bytes (2 micropackets) (256 bits) in length, but contain only 17
|
* are 32 bytes (2 micropackets) (256 bits) in length, but contain only 17
|
||||||
* bytes of usable data, including the sw ack vector in byte 15 (bits 127:120)
|
* bytes of usable data, including the sw ack vector in byte 15 (bits 127:120)
|
||||||
* (12 bytes come from bau_msg_payload, 3 from payload_1, 2 from
|
* (12 bytes come from bau_msg_payload, 3 from payload_1, 2 from
|
||||||
* sw_ack_vector and payload_2)
|
* swack_vec and payload_2)
|
||||||
* "Enabling Software Acknowledgment mode (see Section 4.3.3 Software
|
* "Enabling Software Acknowledgment mode (see Section 4.3.3 Software
|
||||||
* Acknowledge Processing) also selects 32 byte (17 bytes usable) payload
|
* Acknowledge Processing) also selects 32 byte (17 bytes usable) payload
|
||||||
* operation."
|
* operation."
|
||||||
*/
|
*/
|
||||||
struct bau_payload_queue_entry {
|
struct bau_pq_entry {
|
||||||
unsigned long address; /* signifies a page or all TLB's
|
unsigned long address; /* signifies a page or all TLB's
|
||||||
of the cpu */
|
of the cpu */
|
||||||
/* 64 bits, bytes 0-7 */
|
/* 64 bits, bytes 0-7 */
|
||||||
|
|
||||||
unsigned short sending_cpu; /* cpu that sent the message */
|
unsigned short sending_cpu; /* cpu that sent the message */
|
||||||
/* 16 bits, bytes 8-9 */
|
/* 16 bits, bytes 8-9 */
|
||||||
|
|
||||||
unsigned short acknowledge_count; /* filled in by destination */
|
unsigned short acknowledge_count; /* filled in by destination */
|
||||||
/* 16 bits, bytes 10-11 */
|
/* 16 bits, bytes 10-11 */
|
||||||
|
|
||||||
/* these next 3 bytes come from bits 58-81 of the message header */
|
/* these next 3 bytes come from bits 58-81 of the message header */
|
||||||
unsigned short replied_to:1; /* sent as 0 by the source */
|
unsigned short replied_to:1; /* sent as 0 by the source */
|
||||||
unsigned short msg_type:3; /* software message type */
|
unsigned short msg_type:3; /* software message type */
|
||||||
unsigned short canceled:1; /* sent as 0 by the source */
|
unsigned short canceled:1; /* sent as 0 by the source */
|
||||||
unsigned short unused1:3; /* not currently using */
|
unsigned short unused1:3; /* not currently using */
|
||||||
/* byte 12 */
|
/* byte 12 */
|
||||||
|
|
||||||
unsigned char unused2a; /* not currently using */
|
unsigned char unused2a; /* not currently using */
|
||||||
/* byte 13 */
|
/* byte 13 */
|
||||||
unsigned char unused2; /* not currently using */
|
unsigned char unused2; /* not currently using */
|
||||||
/* byte 14 */
|
/* byte 14 */
|
||||||
|
unsigned char swack_vec; /* filled in by the hardware */
|
||||||
unsigned char sw_ack_vector; /* filled in by the hardware */
|
|
||||||
/* byte 15 (bits 127:120) */
|
/* byte 15 (bits 127:120) */
|
||||||
|
|
||||||
unsigned short sequence; /* message sequence number */
|
unsigned short sequence; /* message sequence number */
|
||||||
/* bytes 16-17 */
|
/* bytes 16-17 */
|
||||||
unsigned char unused4[2]; /* not currently using bytes 18-19 */
|
unsigned char unused4[2]; /* not currently using bytes 18-19 */
|
||||||
/* bytes 18-19 */
|
/* bytes 18-19 */
|
||||||
|
|
||||||
int number_of_cpus; /* filled in at destination */
|
int number_of_cpus; /* filled in at destination */
|
||||||
/* 32 bits, bytes 20-23 (aligned) */
|
/* 32 bits, bytes 20-23 (aligned) */
|
||||||
|
|
||||||
unsigned char unused5[8]; /* not using */
|
unsigned char unused5[8]; /* not using */
|
||||||
/* bytes 24-31 */
|
/* bytes 24-31 */
|
||||||
};
|
};
|
||||||
|
|
||||||
struct msg_desc {
|
struct msg_desc {
|
||||||
struct bau_payload_queue_entry *msg;
|
struct bau_pq_entry *msg;
|
||||||
int msg_slot;
|
int msg_slot;
|
||||||
int sw_ack_slot;
|
int swack_slot;
|
||||||
struct bau_payload_queue_entry *va_queue_first;
|
struct bau_pq_entry *queue_first;
|
||||||
struct bau_payload_queue_entry *va_queue_last;
|
struct bau_pq_entry *queue_last;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct reset_args {
|
struct reset_args {
|
||||||
@@ -341,57 +390,101 @@ struct reset_args {
|
|||||||
*/
|
*/
|
||||||
struct ptc_stats {
|
struct ptc_stats {
|
||||||
/* sender statistics */
|
/* sender statistics */
|
||||||
unsigned long s_giveup; /* number of fall backs to IPI-style flushes */
|
unsigned long s_giveup; /* number of fall backs to
|
||||||
unsigned long s_requestor; /* number of shootdown requests */
|
IPI-style flushes */
|
||||||
|
unsigned long s_requestor; /* number of shootdown
|
||||||
|
requests */
|
||||||
unsigned long s_stimeout; /* source side timeouts */
|
unsigned long s_stimeout; /* source side timeouts */
|
||||||
unsigned long s_dtimeout; /* destination side timeouts */
|
unsigned long s_dtimeout; /* destination side timeouts */
|
||||||
unsigned long s_time; /* time spent in sending side */
|
unsigned long s_time; /* time spent in sending side */
|
||||||
unsigned long s_retriesok; /* successful retries */
|
unsigned long s_retriesok; /* successful retries */
|
||||||
unsigned long s_ntargcpu; /* total number of cpu's targeted */
|
unsigned long s_ntargcpu; /* total number of cpu's
|
||||||
unsigned long s_ntargself; /* times the sending cpu was targeted */
|
targeted */
|
||||||
unsigned long s_ntarglocals; /* targets of cpus on the local blade */
|
unsigned long s_ntargself; /* times the sending cpu was
|
||||||
unsigned long s_ntargremotes; /* targets of cpus on remote blades */
|
targeted */
|
||||||
|
unsigned long s_ntarglocals; /* targets of cpus on the local
|
||||||
|
blade */
|
||||||
|
unsigned long s_ntargremotes; /* targets of cpus on remote
|
||||||
|
blades */
|
||||||
unsigned long s_ntarglocaluvhub; /* targets of the local hub */
|
unsigned long s_ntarglocaluvhub; /* targets of the local hub */
|
||||||
unsigned long s_ntargremoteuvhub; /* remotes hubs targeted */
|
unsigned long s_ntargremoteuvhub; /* remotes hubs targeted */
|
||||||
unsigned long s_ntarguvhub; /* total number of uvhubs targeted */
|
unsigned long s_ntarguvhub; /* total number of uvhubs
|
||||||
unsigned long s_ntarguvhub16; /* number of times target hubs >= 16*/
|
targeted */
|
||||||
unsigned long s_ntarguvhub8; /* number of times target hubs >= 8 */
|
unsigned long s_ntarguvhub16; /* number of times target
|
||||||
unsigned long s_ntarguvhub4; /* number of times target hubs >= 4 */
|
hubs >= 16*/
|
||||||
unsigned long s_ntarguvhub2; /* number of times target hubs >= 2 */
|
unsigned long s_ntarguvhub8; /* number of times target
|
||||||
unsigned long s_ntarguvhub1; /* number of times target hubs == 1 */
|
hubs >= 8 */
|
||||||
unsigned long s_resets_plug; /* ipi-style resets from plug state */
|
unsigned long s_ntarguvhub4; /* number of times target
|
||||||
unsigned long s_resets_timeout; /* ipi-style resets from timeouts */
|
hubs >= 4 */
|
||||||
unsigned long s_busy; /* status stayed busy past s/w timer */
|
unsigned long s_ntarguvhub2; /* number of times target
|
||||||
|
hubs >= 2 */
|
||||||
|
unsigned long s_ntarguvhub1; /* number of times target
|
||||||
|
hubs == 1 */
|
||||||
|
unsigned long s_resets_plug; /* ipi-style resets from plug
|
||||||
|
state */
|
||||||
|
unsigned long s_resets_timeout; /* ipi-style resets from
|
||||||
|
timeouts */
|
||||||
|
unsigned long s_busy; /* status stayed busy past
|
||||||
|
s/w timer */
|
||||||
unsigned long s_throttles; /* waits in throttle */
|
unsigned long s_throttles; /* waits in throttle */
|
||||||
unsigned long s_retry_messages; /* retry broadcasts */
|
unsigned long s_retry_messages; /* retry broadcasts */
|
||||||
unsigned long s_bau_reenabled; /* for bau enable/disable */
|
unsigned long s_bau_reenabled; /* for bau enable/disable */
|
||||||
unsigned long s_bau_disabled; /* for bau enable/disable */
|
unsigned long s_bau_disabled; /* for bau enable/disable */
|
||||||
/* destination statistics */
|
/* destination statistics */
|
||||||
unsigned long d_alltlb; /* times all tlb's on this cpu were flushed */
|
unsigned long d_alltlb; /* times all tlb's on this
|
||||||
unsigned long d_onetlb; /* times just one tlb on this cpu was flushed */
|
cpu were flushed */
|
||||||
unsigned long d_multmsg; /* interrupts with multiple messages */
|
unsigned long d_onetlb; /* times just one tlb on this
|
||||||
|
cpu was flushed */
|
||||||
|
unsigned long d_multmsg; /* interrupts with multiple
|
||||||
|
messages */
|
||||||
unsigned long d_nomsg; /* interrupts with no message */
|
unsigned long d_nomsg; /* interrupts with no message */
|
||||||
unsigned long d_time; /* time spent on destination side */
|
unsigned long d_time; /* time spent on destination
|
||||||
unsigned long d_requestee; /* number of messages processed */
|
side */
|
||||||
unsigned long d_retries; /* number of retry messages processed */
|
unsigned long d_requestee; /* number of messages
|
||||||
unsigned long d_canceled; /* number of messages canceled by retries */
|
processed */
|
||||||
unsigned long d_nocanceled; /* retries that found nothing to cancel */
|
unsigned long d_retries; /* number of retry messages
|
||||||
unsigned long d_resets; /* number of ipi-style requests processed */
|
processed */
|
||||||
unsigned long d_rcanceled; /* number of messages canceled by resets */
|
unsigned long d_canceled; /* number of messages canceled
|
||||||
|
by retries */
|
||||||
|
unsigned long d_nocanceled; /* retries that found nothing
|
||||||
|
to cancel */
|
||||||
|
unsigned long d_resets; /* number of ipi-style requests
|
||||||
|
processed */
|
||||||
|
unsigned long d_rcanceled; /* number of messages canceled
|
||||||
|
by resets */
|
||||||
|
};
|
||||||
|
|
||||||
|
struct tunables {
|
||||||
|
int *tunp;
|
||||||
|
int deflt;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct hub_and_pnode {
|
struct hub_and_pnode {
|
||||||
short uvhub;
|
short uvhub;
|
||||||
short pnode;
|
short pnode;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct socket_desc {
|
||||||
|
short num_cpus;
|
||||||
|
short cpu_number[MAX_CPUS_PER_SOCKET];
|
||||||
|
};
|
||||||
|
|
||||||
|
struct uvhub_desc {
|
||||||
|
unsigned short socket_mask;
|
||||||
|
short num_cpus;
|
||||||
|
short uvhub;
|
||||||
|
short pnode;
|
||||||
|
struct socket_desc socket[2];
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* one per-cpu; to locate the software tables
|
* one per-cpu; to locate the software tables
|
||||||
*/
|
*/
|
||||||
struct bau_control {
|
struct bau_control {
|
||||||
struct bau_desc *descriptor_base;
|
struct bau_desc *descriptor_base;
|
||||||
struct bau_payload_queue_entry *va_queue_first;
|
struct bau_pq_entry *queue_first;
|
||||||
struct bau_payload_queue_entry *va_queue_last;
|
struct bau_pq_entry *queue_last;
|
||||||
struct bau_payload_queue_entry *bau_msg_head;
|
struct bau_pq_entry *bau_msg_head;
|
||||||
struct bau_control *uvhub_master;
|
struct bau_control *uvhub_master;
|
||||||
struct bau_control *socket_master;
|
struct bau_control *socket_master;
|
||||||
struct ptc_stats *statp;
|
struct ptc_stats *statp;
|
||||||
@@ -418,35 +511,105 @@ struct bau_control {
|
|||||||
spinlock_t uvhub_lock;
|
spinlock_t uvhub_lock;
|
||||||
spinlock_t queue_lock;
|
spinlock_t queue_lock;
|
||||||
/* tunables */
|
/* tunables */
|
||||||
int max_bau_concurrent;
|
int max_concurr;
|
||||||
int max_bau_concurrent_constant;
|
int max_concurr_const;
|
||||||
int plugged_delay;
|
int plugged_delay;
|
||||||
int plugsb4reset;
|
int plugsb4reset;
|
||||||
int timeoutsb4reset;
|
int timeoutsb4reset;
|
||||||
int ipi_reset_limit;
|
int ipi_reset_limit;
|
||||||
int complete_threshold;
|
int complete_threshold;
|
||||||
int congested_response_us;
|
int cong_response_us;
|
||||||
int congested_reps;
|
int cong_reps;
|
||||||
int congested_period;
|
int cong_period;
|
||||||
cycles_t period_time;
|
cycles_t period_time;
|
||||||
long period_requests;
|
long period_requests;
|
||||||
struct hub_and_pnode *target_hub_and_pnode;
|
struct hub_and_pnode *thp;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline int bau_uvhub_isset(int uvhub, struct bau_target_uvhubmask *dstp)
|
static unsigned long read_mmr_uv2_status(void)
|
||||||
|
{
|
||||||
|
return read_lmmr(UV2H_LB_BAU_SB_ACTIVATION_STATUS_2);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void write_mmr_data_broadcast(int pnode, unsigned long mmr_image)
|
||||||
|
{
|
||||||
|
write_gmmr(pnode, UVH_BAU_DATA_BROADCAST, mmr_image);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void write_mmr_descriptor_base(int pnode, unsigned long mmr_image)
|
||||||
|
{
|
||||||
|
write_gmmr(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, mmr_image);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void write_mmr_activation(unsigned long index)
|
||||||
|
{
|
||||||
|
write_lmmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void write_gmmr_activation(int pnode, unsigned long mmr_image)
|
||||||
|
{
|
||||||
|
write_gmmr(pnode, UVH_LB_BAU_SB_ACTIVATION_CONTROL, mmr_image);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void write_mmr_payload_first(int pnode, unsigned long mmr_image)
|
||||||
|
{
|
||||||
|
write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, mmr_image);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void write_mmr_payload_tail(int pnode, unsigned long mmr_image)
|
||||||
|
{
|
||||||
|
write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, mmr_image);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void write_mmr_payload_last(int pnode, unsigned long mmr_image)
|
||||||
|
{
|
||||||
|
write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST, mmr_image);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void write_mmr_misc_control(int pnode, unsigned long mmr_image)
|
||||||
|
{
|
||||||
|
write_gmmr(pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
|
||||||
|
}
|
||||||
|
|
||||||
|
static unsigned long read_mmr_misc_control(int pnode)
|
||||||
|
{
|
||||||
|
return read_gmmr(pnode, UVH_LB_BAU_MISC_CONTROL);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void write_mmr_sw_ack(unsigned long mr)
|
||||||
|
{
|
||||||
|
uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
|
||||||
|
}
|
||||||
|
|
||||||
|
static unsigned long read_mmr_sw_ack(void)
|
||||||
|
{
|
||||||
|
return read_lmmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static unsigned long read_gmmr_sw_ack(int pnode)
|
||||||
|
{
|
||||||
|
return read_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void write_mmr_data_config(int pnode, unsigned long mr)
|
||||||
|
{
|
||||||
|
uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, mr);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int bau_uvhub_isset(int uvhub, struct bau_targ_hubmask *dstp)
|
||||||
{
|
{
|
||||||
return constant_test_bit(uvhub, &dstp->bits[0]);
|
return constant_test_bit(uvhub, &dstp->bits[0]);
|
||||||
}
|
}
|
||||||
static inline void bau_uvhub_set(int pnode, struct bau_target_uvhubmask *dstp)
|
static inline void bau_uvhub_set(int pnode, struct bau_targ_hubmask *dstp)
|
||||||
{
|
{
|
||||||
__set_bit(pnode, &dstp->bits[0]);
|
__set_bit(pnode, &dstp->bits[0]);
|
||||||
}
|
}
|
||||||
static inline void bau_uvhubs_clear(struct bau_target_uvhubmask *dstp,
|
static inline void bau_uvhubs_clear(struct bau_targ_hubmask *dstp,
|
||||||
int nbits)
|
int nbits)
|
||||||
{
|
{
|
||||||
bitmap_zero(&dstp->bits[0], nbits);
|
bitmap_zero(&dstp->bits[0], nbits);
|
||||||
}
|
}
|
||||||
static inline int bau_uvhub_weight(struct bau_target_uvhubmask *dstp)
|
static inline int bau_uvhub_weight(struct bau_targ_hubmask *dstp)
|
||||||
{
|
{
|
||||||
return bitmap_weight((unsigned long *)&dstp->bits[0],
|
return bitmap_weight((unsigned long *)&dstp->bits[0],
|
||||||
UV_DISTRIBUTION_SIZE);
|
UV_DISTRIBUTION_SIZE);
|
||||||
@@ -457,9 +620,6 @@ static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
|
|||||||
bitmap_zero(&dstp->bits, nbits);
|
bitmap_zero(&dstp->bits, nbits);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define cpubit_isset(cpu, bau_local_cpumask) \
|
|
||||||
test_bit((cpu), (bau_local_cpumask).bits)
|
|
||||||
|
|
||||||
extern void uv_bau_message_intr1(void);
|
extern void uv_bau_message_intr1(void);
|
||||||
extern void uv_bau_timeout_intr1(void);
|
extern void uv_bau_timeout_intr1(void);
|
||||||
|
|
||||||
@@ -467,7 +627,7 @@ struct atomic_short {
|
|||||||
short counter;
|
short counter;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* atomic_read_short - read a short atomic variable
|
* atomic_read_short - read a short atomic variable
|
||||||
* @v: pointer of type atomic_short
|
* @v: pointer of type atomic_short
|
||||||
*
|
*
|
||||||
@@ -478,14 +638,14 @@ static inline int atomic_read_short(const struct atomic_short *v)
|
|||||||
return v->counter;
|
return v->counter;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/*
|
||||||
* atomic_add_short_return - add and return a short int
|
* atom_asr - add and return a short int
|
||||||
* @i: short value to add
|
* @i: short value to add
|
||||||
* @v: pointer of type atomic_short
|
* @v: pointer of type atomic_short
|
||||||
*
|
*
|
||||||
* Atomically adds @i to @v and returns @i + @v
|
* Atomically adds @i to @v and returns @i + @v
|
||||||
*/
|
*/
|
||||||
static inline int atomic_add_short_return(short i, struct atomic_short *v)
|
static inline int atom_asr(short i, struct atomic_short *v)
|
||||||
{
|
{
|
||||||
short __i = i;
|
short __i = i;
|
||||||
asm volatile(LOCK_PREFIX "xaddw %0, %1"
|
asm volatile(LOCK_PREFIX "xaddw %0, %1"
|
||||||
@@ -494,4 +654,26 @@ static inline int atomic_add_short_return(short i, struct atomic_short *v)
|
|||||||
return i + __i;
|
return i + __i;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* conditionally add 1 to *v, unless *v is >= u
|
||||||
|
* return 0 if we cannot add 1 to *v because it is >= u
|
||||||
|
* return 1 if we can add 1 to *v because it is < u
|
||||||
|
* the add is atomic
|
||||||
|
*
|
||||||
|
* This is close to atomic_add_unless(), but this allows the 'u' value
|
||||||
|
* to be lowered below the current 'v'. atomic_add_unless can only stop
|
||||||
|
* on equal.
|
||||||
|
*/
|
||||||
|
static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
|
||||||
|
{
|
||||||
|
spin_lock(lock);
|
||||||
|
if (atomic_read(v) >= u) {
|
||||||
|
spin_unlock(lock);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
atomic_inc(v);
|
||||||
|
spin_unlock(lock);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* _ASM_X86_UV_UV_BAU_H */
|
#endif /* _ASM_X86_UV_UV_BAU_H */
|
||||||
|
@@ -77,8 +77,9 @@
|
|||||||
*
|
*
|
||||||
* 1111110000000000
|
* 1111110000000000
|
||||||
* 5432109876543210
|
* 5432109876543210
|
||||||
* pppppppppplc0cch Nehalem-EX
|
* pppppppppplc0cch Nehalem-EX (12 bits in hdw reg)
|
||||||
* ppppppppplcc0cch Westmere-EX
|
* ppppppppplcc0cch Westmere-EX (12 bits in hdw reg)
|
||||||
|
* pppppppppppcccch SandyBridge (15 bits in hdw reg)
|
||||||
* sssssssssss
|
* sssssssssss
|
||||||
*
|
*
|
||||||
* p = pnode bits
|
* p = pnode bits
|
||||||
@@ -87,7 +88,7 @@
|
|||||||
* h = hyperthread
|
* h = hyperthread
|
||||||
* s = bits that are in the SOCKET_ID CSR
|
* s = bits that are in the SOCKET_ID CSR
|
||||||
*
|
*
|
||||||
* Note: Processor only supports 12 bits in the APICID register. The ACPI
|
* Note: Processor may support fewer bits in the APICID register. The ACPI
|
||||||
* tables hold all 16 bits. Software needs to be aware of this.
|
* tables hold all 16 bits. Software needs to be aware of this.
|
||||||
*
|
*
|
||||||
* Unless otherwise specified, all references to APICID refer to
|
* Unless otherwise specified, all references to APICID refer to
|
||||||
@@ -138,6 +139,8 @@ struct uv_hub_info_s {
|
|||||||
unsigned long global_mmr_base;
|
unsigned long global_mmr_base;
|
||||||
unsigned long gpa_mask;
|
unsigned long gpa_mask;
|
||||||
unsigned int gnode_extra;
|
unsigned int gnode_extra;
|
||||||
|
unsigned char hub_revision;
|
||||||
|
unsigned char apic_pnode_shift;
|
||||||
unsigned long gnode_upper;
|
unsigned long gnode_upper;
|
||||||
unsigned long lowmem_remap_top;
|
unsigned long lowmem_remap_top;
|
||||||
unsigned long lowmem_remap_base;
|
unsigned long lowmem_remap_base;
|
||||||
@@ -149,13 +152,31 @@ struct uv_hub_info_s {
|
|||||||
unsigned char m_val;
|
unsigned char m_val;
|
||||||
unsigned char n_val;
|
unsigned char n_val;
|
||||||
struct uv_scir_s scir;
|
struct uv_scir_s scir;
|
||||||
unsigned char apic_pnode_shift;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
|
DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
|
||||||
#define uv_hub_info (&__get_cpu_var(__uv_hub_info))
|
#define uv_hub_info (&__get_cpu_var(__uv_hub_info))
|
||||||
#define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu))
|
#define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu))
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Hub revisions less than UV2_HUB_REVISION_BASE are UV1 hubs. All UV2
|
||||||
|
* hubs have revision numbers greater than or equal to UV2_HUB_REVISION_BASE.
|
||||||
|
* This is a software convention - NOT the hardware revision numbers in
|
||||||
|
* the hub chip.
|
||||||
|
*/
|
||||||
|
#define UV1_HUB_REVISION_BASE 1
|
||||||
|
#define UV2_HUB_REVISION_BASE 3
|
||||||
|
|
||||||
|
static inline int is_uv1_hub(void)
|
||||||
|
{
|
||||||
|
return uv_hub_info->hub_revision < UV2_HUB_REVISION_BASE;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int is_uv2_hub(void)
|
||||||
|
{
|
||||||
|
return uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE;
|
||||||
|
}
|
||||||
|
|
||||||
union uvh_apicid {
|
union uvh_apicid {
|
||||||
unsigned long v;
|
unsigned long v;
|
||||||
struct uvh_apicid_s {
|
struct uvh_apicid_s {
|
||||||
@@ -180,11 +201,25 @@ union uvh_apicid {
|
|||||||
#define UV_PNODE_TO_GNODE(p) ((p) |uv_hub_info->gnode_extra)
|
#define UV_PNODE_TO_GNODE(p) ((p) |uv_hub_info->gnode_extra)
|
||||||
#define UV_PNODE_TO_NASID(p) (UV_PNODE_TO_GNODE(p) << 1)
|
#define UV_PNODE_TO_NASID(p) (UV_PNODE_TO_GNODE(p) << 1)
|
||||||
|
|
||||||
#define UV_LOCAL_MMR_BASE 0xf4000000UL
|
#define UV1_LOCAL_MMR_BASE 0xf4000000UL
|
||||||
#define UV_GLOBAL_MMR32_BASE 0xf8000000UL
|
#define UV1_GLOBAL_MMR32_BASE 0xf8000000UL
|
||||||
|
#define UV1_LOCAL_MMR_SIZE (64UL * 1024 * 1024)
|
||||||
|
#define UV1_GLOBAL_MMR32_SIZE (64UL * 1024 * 1024)
|
||||||
|
|
||||||
|
#define UV2_LOCAL_MMR_BASE 0xfa000000UL
|
||||||
|
#define UV2_GLOBAL_MMR32_BASE 0xfc000000UL
|
||||||
|
#define UV2_LOCAL_MMR_SIZE (32UL * 1024 * 1024)
|
||||||
|
#define UV2_GLOBAL_MMR32_SIZE (32UL * 1024 * 1024)
|
||||||
|
|
||||||
|
#define UV_LOCAL_MMR_BASE (is_uv1_hub() ? UV1_LOCAL_MMR_BASE \
|
||||||
|
: UV2_LOCAL_MMR_BASE)
|
||||||
|
#define UV_GLOBAL_MMR32_BASE (is_uv1_hub() ? UV1_GLOBAL_MMR32_BASE \
|
||||||
|
: UV2_GLOBAL_MMR32_BASE)
|
||||||
|
#define UV_LOCAL_MMR_SIZE (is_uv1_hub() ? UV1_LOCAL_MMR_SIZE : \
|
||||||
|
UV2_LOCAL_MMR_SIZE)
|
||||||
|
#define UV_GLOBAL_MMR32_SIZE (is_uv1_hub() ? UV1_GLOBAL_MMR32_SIZE :\
|
||||||
|
UV2_GLOBAL_MMR32_SIZE)
|
||||||
#define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base)
|
#define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base)
|
||||||
#define UV_LOCAL_MMR_SIZE (64UL * 1024 * 1024)
|
|
||||||
#define UV_GLOBAL_MMR32_SIZE (64UL * 1024 * 1024)
|
|
||||||
|
|
||||||
#define UV_GLOBAL_GRU_MMR_BASE 0x4000000
|
#define UV_GLOBAL_GRU_MMR_BASE 0x4000000
|
||||||
|
|
||||||
@@ -300,6 +335,17 @@ static inline int uv_apicid_to_pnode(int apicid)
|
|||||||
return (apicid >> uv_hub_info->apic_pnode_shift);
|
return (apicid >> uv_hub_info->apic_pnode_shift);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Convert an apicid to the socket number on the blade
|
||||||
|
*/
|
||||||
|
static inline int uv_apicid_to_socket(int apicid)
|
||||||
|
{
|
||||||
|
if (is_uv1_hub())
|
||||||
|
return (apicid >> (uv_hub_info->apic_pnode_shift - 1)) & 1;
|
||||||
|
else
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Access global MMRs using the low memory MMR32 space. This region supports
|
* Access global MMRs using the low memory MMR32 space. This region supports
|
||||||
* faster MMR access but not all MMRs are accessible in this space.
|
* faster MMR access but not all MMRs are accessible in this space.
|
||||||
@@ -519,14 +565,13 @@ static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Get the minimum revision number of the hub chips within the partition.
|
* Get the minimum revision number of the hub chips within the partition.
|
||||||
* 1 - initial rev 1.0 silicon
|
* 1 - UV1 rev 1.0 initial silicon
|
||||||
* 2 - rev 2.0 production silicon
|
* 2 - UV1 rev 2.0 production silicon
|
||||||
|
* 3 - UV2 rev 1.0 initial silicon
|
||||||
*/
|
*/
|
||||||
static inline int uv_get_min_hub_revision_id(void)
|
static inline int uv_get_min_hub_revision_id(void)
|
||||||
{
|
{
|
||||||
extern int uv_min_hub_revision_id;
|
return uv_hub_info->hub_revision;
|
||||||
|
|
||||||
return uv_min_hub_revision_id;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_X86_64 */
|
#endif /* CONFIG_X86_64 */
|
||||||
|
File diff suppressed because it is too large
Load Diff
@@ -91,6 +91,10 @@ static int __init early_get_pnodeid(void)
|
|||||||
m_n_config.v = uv_early_read_mmr(UVH_RH_GAM_CONFIG_MMR);
|
m_n_config.v = uv_early_read_mmr(UVH_RH_GAM_CONFIG_MMR);
|
||||||
uv_min_hub_revision_id = node_id.s.revision;
|
uv_min_hub_revision_id = node_id.s.revision;
|
||||||
|
|
||||||
|
if (node_id.s.part_number == UV2_HUB_PART_NUMBER)
|
||||||
|
uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
|
||||||
|
|
||||||
|
uv_hub_info->hub_revision = uv_min_hub_revision_id;
|
||||||
pnode = (node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1);
|
pnode = (node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1);
|
||||||
return pnode;
|
return pnode;
|
||||||
}
|
}
|
||||||
@@ -112,17 +116,25 @@ static void __init early_get_apic_pnode_shift(void)
|
|||||||
*/
|
*/
|
||||||
static void __init uv_set_apicid_hibit(void)
|
static void __init uv_set_apicid_hibit(void)
|
||||||
{
|
{
|
||||||
union uvh_lb_target_physical_apic_id_mask_u apicid_mask;
|
union uv1h_lb_target_physical_apic_id_mask_u apicid_mask;
|
||||||
|
|
||||||
apicid_mask.v = uv_early_read_mmr(UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK);
|
if (is_uv1_hub()) {
|
||||||
uv_apicid_hibits = apicid_mask.s.bit_enables & UV_APICID_HIBIT_MASK;
|
apicid_mask.v =
|
||||||
|
uv_early_read_mmr(UV1H_LB_TARGET_PHYSICAL_APIC_ID_MASK);
|
||||||
|
uv_apicid_hibits =
|
||||||
|
apicid_mask.s1.bit_enables & UV_APICID_HIBIT_MASK;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
||||||
{
|
{
|
||||||
int pnodeid;
|
int pnodeid, is_uv1, is_uv2;
|
||||||
|
|
||||||
if (!strcmp(oem_id, "SGI")) {
|
is_uv1 = !strcmp(oem_id, "SGI");
|
||||||
|
is_uv2 = !strcmp(oem_id, "SGI2");
|
||||||
|
if (is_uv1 || is_uv2) {
|
||||||
|
uv_hub_info->hub_revision =
|
||||||
|
is_uv1 ? UV1_HUB_REVISION_BASE : UV2_HUB_REVISION_BASE;
|
||||||
pnodeid = early_get_pnodeid();
|
pnodeid = early_get_pnodeid();
|
||||||
early_get_apic_pnode_shift();
|
early_get_apic_pnode_shift();
|
||||||
x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
|
x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
|
||||||
@@ -484,13 +496,20 @@ static __init void map_mmr_high(int max_pnode)
|
|||||||
static __init void map_mmioh_high(int max_pnode)
|
static __init void map_mmioh_high(int max_pnode)
|
||||||
{
|
{
|
||||||
union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
|
union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
|
||||||
int shift = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
|
int shift;
|
||||||
|
|
||||||
mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
|
mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
|
||||||
if (mmioh.s.enable)
|
if (is_uv1_hub() && mmioh.s1.enable) {
|
||||||
map_high("MMIOH", mmioh.s.base, shift, mmioh.s.m_io,
|
shift = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
|
||||||
|
map_high("MMIOH", mmioh.s1.base, shift, mmioh.s1.m_io,
|
||||||
max_pnode, map_uc);
|
max_pnode, map_uc);
|
||||||
}
|
}
|
||||||
|
if (is_uv2_hub() && mmioh.s2.enable) {
|
||||||
|
shift = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
|
||||||
|
map_high("MMIOH", mmioh.s2.base, shift, mmioh.s2.m_io,
|
||||||
|
max_pnode, map_uc);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static __init void map_low_mmrs(void)
|
static __init void map_low_mmrs(void)
|
||||||
{
|
{
|
||||||
@@ -736,13 +755,14 @@ void __init uv_system_init(void)
|
|||||||
unsigned long mmr_base, present, paddr;
|
unsigned long mmr_base, present, paddr;
|
||||||
unsigned short pnode_mask, pnode_io_mask;
|
unsigned short pnode_mask, pnode_io_mask;
|
||||||
|
|
||||||
|
printk(KERN_INFO "UV: Found %s hub\n", is_uv1_hub() ? "UV1" : "UV2");
|
||||||
map_low_mmrs();
|
map_low_mmrs();
|
||||||
|
|
||||||
m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
|
m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
|
||||||
m_val = m_n_config.s.m_skt;
|
m_val = m_n_config.s.m_skt;
|
||||||
n_val = m_n_config.s.n_skt;
|
n_val = m_n_config.s.n_skt;
|
||||||
mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
|
mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
|
||||||
n_io = mmioh.s.n_io;
|
n_io = is_uv1_hub() ? mmioh.s1.n_io : mmioh.s2.n_io;
|
||||||
mmr_base =
|
mmr_base =
|
||||||
uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
|
uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
|
||||||
~UV_MMR_ENABLE;
|
~UV_MMR_ENABLE;
|
||||||
@@ -811,6 +831,8 @@ void __init uv_system_init(void)
|
|||||||
*/
|
*/
|
||||||
uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask;
|
uv_cpu_hub_info(cpu)->pnode_mask = pnode_mask;
|
||||||
uv_cpu_hub_info(cpu)->apic_pnode_shift = uvh_apicid.s.pnode_shift;
|
uv_cpu_hub_info(cpu)->apic_pnode_shift = uvh_apicid.s.pnode_shift;
|
||||||
|
uv_cpu_hub_info(cpu)->hub_revision = uv_hub_info->hub_revision;
|
||||||
|
|
||||||
pnode = uv_apicid_to_pnode(apicid);
|
pnode = uv_apicid_to_pnode(apicid);
|
||||||
blade = boot_pnode_to_blade(pnode);
|
blade = boot_pnode_to_blade(pnode);
|
||||||
lcpu = uv_blade_info[blade].nr_possible_cpus;
|
lcpu = uv_blade_info[blade].nr_possible_cpus;
|
||||||
|
@@ -612,8 +612,11 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* As a rule processors have APIC timer running in deep C states */
|
/*
|
||||||
if (c->x86 > 0xf && !cpu_has_amd_erratum(amd_erratum_400))
|
* Family 0x12 and above processors have APIC timer
|
||||||
|
* running in deep C states.
|
||||||
|
*/
|
||||||
|
if (c->x86 > 0x11)
|
||||||
set_cpu_cap(c, X86_FEATURE_ARAT);
|
set_cpu_cap(c, X86_FEATURE_ARAT);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@@ -477,13 +477,6 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
|
|||||||
if (smp_num_siblings <= 1)
|
if (smp_num_siblings <= 1)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (smp_num_siblings > nr_cpu_ids) {
|
|
||||||
pr_warning("CPU: Unsupported number of siblings %d",
|
|
||||||
smp_num_siblings);
|
|
||||||
smp_num_siblings = 1;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
index_msb = get_count_order(smp_num_siblings);
|
index_msb = get_count_order(smp_num_siblings);
|
||||||
c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
|
c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
|
||||||
|
|
||||||
|
@@ -910,6 +910,13 @@ void __init setup_arch(char **cmdline_p)
|
|||||||
memblock.current_limit = get_max_mapped();
|
memblock.current_limit = get_max_mapped();
|
||||||
memblock_x86_fill();
|
memblock_x86_fill();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The EFI specification says that boot service code won't be called
|
||||||
|
* after ExitBootServices(). This is, in fact, a lie.
|
||||||
|
*/
|
||||||
|
if (efi_enabled)
|
||||||
|
efi_reserve_boot_services();
|
||||||
|
|
||||||
/* preallocate 4k for mptable mpc */
|
/* preallocate 4k for mptable mpc */
|
||||||
early_reserve_e820_mpc_new();
|
early_reserve_e820_mpc_new();
|
||||||
|
|
||||||
|
@@ -823,16 +823,30 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
|
|||||||
force_sig_info_fault(SIGBUS, code, address, tsk, fault);
|
force_sig_info_fault(SIGBUS, code, address, tsk, fault);
|
||||||
}
|
}
|
||||||
|
|
||||||
static noinline void
|
static noinline int
|
||||||
mm_fault_error(struct pt_regs *regs, unsigned long error_code,
|
mm_fault_error(struct pt_regs *regs, unsigned long error_code,
|
||||||
unsigned long address, unsigned int fault)
|
unsigned long address, unsigned int fault)
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* Pagefault was interrupted by SIGKILL. We have no reason to
|
||||||
|
* continue pagefault.
|
||||||
|
*/
|
||||||
|
if (fatal_signal_pending(current)) {
|
||||||
|
if (!(fault & VM_FAULT_RETRY))
|
||||||
|
up_read(¤t->mm->mmap_sem);
|
||||||
|
if (!(error_code & PF_USER))
|
||||||
|
no_context(regs, error_code, address);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
if (!(fault & VM_FAULT_ERROR))
|
||||||
|
return 0;
|
||||||
|
|
||||||
if (fault & VM_FAULT_OOM) {
|
if (fault & VM_FAULT_OOM) {
|
||||||
/* Kernel mode? Handle exceptions or die: */
|
/* Kernel mode? Handle exceptions or die: */
|
||||||
if (!(error_code & PF_USER)) {
|
if (!(error_code & PF_USER)) {
|
||||||
up_read(¤t->mm->mmap_sem);
|
up_read(¤t->mm->mmap_sem);
|
||||||
no_context(regs, error_code, address);
|
no_context(regs, error_code, address);
|
||||||
return;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
out_of_memory(regs, error_code, address);
|
out_of_memory(regs, error_code, address);
|
||||||
@@ -843,6 +857,7 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
|
|||||||
else
|
else
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int spurious_fault_check(unsigned long error_code, pte_t *pte)
|
static int spurious_fault_check(unsigned long error_code, pte_t *pte)
|
||||||
@@ -1133,18 +1148,8 @@ good_area:
|
|||||||
*/
|
*/
|
||||||
fault = handle_mm_fault(mm, vma, address, flags);
|
fault = handle_mm_fault(mm, vma, address, flags);
|
||||||
|
|
||||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
|
||||||
mm_fault_error(regs, error_code, address, fault);
|
if (mm_fault_error(regs, error_code, address, fault))
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Pagefault was interrupted by SIGKILL. We have no reason to
|
|
||||||
* continue pagefault.
|
|
||||||
*/
|
|
||||||
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
|
|
||||||
if (!(error_code & PF_USER))
|
|
||||||
no_context(regs, error_code, address);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -304,6 +304,40 @@ static void __init print_efi_memmap(void)
|
|||||||
}
|
}
|
||||||
#endif /* EFI_DEBUG */
|
#endif /* EFI_DEBUG */
|
||||||
|
|
||||||
|
void __init efi_reserve_boot_services(void)
|
||||||
|
{
|
||||||
|
void *p;
|
||||||
|
|
||||||
|
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
|
||||||
|
efi_memory_desc_t *md = p;
|
||||||
|
unsigned long long start = md->phys_addr;
|
||||||
|
unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
|
||||||
|
|
||||||
|
if (md->type != EFI_BOOT_SERVICES_CODE &&
|
||||||
|
md->type != EFI_BOOT_SERVICES_DATA)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
memblock_x86_reserve_range(start, start + size, "EFI Boot");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init efi_free_boot_services(void)
|
||||||
|
{
|
||||||
|
void *p;
|
||||||
|
|
||||||
|
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
|
||||||
|
efi_memory_desc_t *md = p;
|
||||||
|
unsigned long long start = md->phys_addr;
|
||||||
|
unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
|
||||||
|
|
||||||
|
if (md->type != EFI_BOOT_SERVICES_CODE &&
|
||||||
|
md->type != EFI_BOOT_SERVICES_DATA)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
free_bootmem_late(start, size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void __init efi_init(void)
|
void __init efi_init(void)
|
||||||
{
|
{
|
||||||
efi_config_table_t *config_tables;
|
efi_config_table_t *config_tables;
|
||||||
@@ -536,7 +570,9 @@ void __init efi_enter_virtual_mode(void)
|
|||||||
|
|
||||||
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
|
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
|
||||||
md = p;
|
md = p;
|
||||||
if (!(md->attribute & EFI_MEMORY_RUNTIME))
|
if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
|
||||||
|
md->type != EFI_BOOT_SERVICES_CODE &&
|
||||||
|
md->type != EFI_BOOT_SERVICES_DATA)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
size = md->num_pages << EFI_PAGE_SHIFT;
|
size = md->num_pages << EFI_PAGE_SHIFT;
|
||||||
@@ -592,6 +628,13 @@ void __init efi_enter_virtual_mode(void)
|
|||||||
panic("EFI call to SetVirtualAddressMap() failed!");
|
panic("EFI call to SetVirtualAddressMap() failed!");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Thankfully, it does seem that no runtime services other than
|
||||||
|
* SetVirtualAddressMap() will touch boot services code, so we can
|
||||||
|
* get rid of it all at this point
|
||||||
|
*/
|
||||||
|
efi_free_boot_services();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Now that EFI is in virtual mode, update the function
|
* Now that EFI is in virtual mode, update the function
|
||||||
* pointers in the runtime service table to the new virtual addresses.
|
* pointers in the runtime service table to the new virtual addresses.
|
||||||
|
@@ -49,10 +49,11 @@ static void __init early_code_mapping_set_exec(int executable)
|
|||||||
if (!(__supported_pte_mask & _PAGE_NX))
|
if (!(__supported_pte_mask & _PAGE_NX))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Make EFI runtime service code area executable */
|
/* Make EFI service code area executable */
|
||||||
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
|
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
|
||||||
md = p;
|
md = p;
|
||||||
if (md->type == EFI_RUNTIME_SERVICES_CODE)
|
if (md->type == EFI_RUNTIME_SERVICES_CODE ||
|
||||||
|
md->type == EFI_BOOT_SERVICES_CODE)
|
||||||
efi_set_executable(md, executable);
|
efi_set_executable(md, executable);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
@@ -99,8 +99,12 @@ static void uv_rtc_send_IPI(int cpu)
|
|||||||
/* Check for an RTC interrupt pending */
|
/* Check for an RTC interrupt pending */
|
||||||
static int uv_intr_pending(int pnode)
|
static int uv_intr_pending(int pnode)
|
||||||
{
|
{
|
||||||
|
if (is_uv1_hub())
|
||||||
return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED0) &
|
return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED0) &
|
||||||
UVH_EVENT_OCCURRED0_RTC1_MASK;
|
UV1H_EVENT_OCCURRED0_RTC1_MASK;
|
||||||
|
else
|
||||||
|
return uv_read_global_mmr64(pnode, UV2H_EVENT_OCCURRED2) &
|
||||||
|
UV2H_EVENT_OCCURRED2_RTC_1_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Setup interrupt and return non-zero if early expiration occurred. */
|
/* Setup interrupt and return non-zero if early expiration occurred. */
|
||||||
@@ -114,8 +118,12 @@ static int uv_setup_intr(int cpu, u64 expires)
|
|||||||
UVH_RTC1_INT_CONFIG_M_MASK);
|
UVH_RTC1_INT_CONFIG_M_MASK);
|
||||||
uv_write_global_mmr64(pnode, UVH_INT_CMPB, -1L);
|
uv_write_global_mmr64(pnode, UVH_INT_CMPB, -1L);
|
||||||
|
|
||||||
|
if (is_uv1_hub())
|
||||||
uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED0_ALIAS,
|
uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED0_ALIAS,
|
||||||
UVH_EVENT_OCCURRED0_RTC1_MASK);
|
UV1H_EVENT_OCCURRED0_RTC1_MASK);
|
||||||
|
else
|
||||||
|
uv_write_global_mmr64(pnode, UV2H_EVENT_OCCURRED2_ALIAS,
|
||||||
|
UV2H_EVENT_OCCURRED2_RTC_1_MASK);
|
||||||
|
|
||||||
val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) |
|
val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) |
|
||||||
((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT);
|
((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT);
|
||||||
|
@@ -299,6 +299,7 @@ extern void efi_initialize_iomem_resources(struct resource *code_resource,
|
|||||||
struct resource *data_resource, struct resource *bss_resource);
|
struct resource *data_resource, struct resource *bss_resource);
|
||||||
extern unsigned long efi_get_time(void);
|
extern unsigned long efi_get_time(void);
|
||||||
extern int efi_set_rtc_mmss(unsigned long nowtime);
|
extern int efi_set_rtc_mmss(unsigned long nowtime);
|
||||||
|
extern void efi_reserve_boot_services(void);
|
||||||
extern struct efi_memory_map memmap;
|
extern struct efi_memory_map memmap;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
Reference in New Issue
Block a user