Merge commit 'v2.6.31-rc9' into tracing/core
Merge reason: move from -rc5 to -rc9. Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
@ -106,6 +106,9 @@ void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
|
||||
unsigned long mask = cpumask_bits(cpumask)[0];
|
||||
unsigned long flags;
|
||||
|
||||
if (WARN_ONCE(!mask, "empty IPI mask"))
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
|
||||
__default_send_IPI_dest_field(mask, vector, apic->dest_logical);
|
||||
|
@ -44,6 +44,11 @@ static struct apic *apic_probe[] __initdata = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
static int apicid_phys_pkg_id(int initial_apic_id, int index_msb)
|
||||
{
|
||||
return hard_smp_processor_id() >> index_msb;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
|
||||
*/
|
||||
@ -69,6 +74,11 @@ void __init default_setup_apic_routing(void)
|
||||
printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
|
||||
}
|
||||
|
||||
if (is_vsmp_box()) {
|
||||
/* need to update phys_pkg_id */
|
||||
apic->phys_pkg_id = apicid_phys_pkg_id;
|
||||
}
|
||||
|
||||
/*
|
||||
* Now that apic routing model is selected, configure the
|
||||
* fault handling for intr remapping.
|
||||
|
@ -46,7 +46,7 @@ static int early_get_nodeid(void)
|
||||
return node_id.s.node_id;
|
||||
}
|
||||
|
||||
static int uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
||||
static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
||||
{
|
||||
if (!strcmp(oem_id, "SGI")) {
|
||||
if (!strcmp(oem_table_id, "UVL"))
|
||||
@ -253,7 +253,7 @@ static void uv_send_IPI_self(int vector)
|
||||
apic_write(APIC_SELF_IPI, vector);
|
||||
}
|
||||
|
||||
struct apic apic_x2apic_uv_x = {
|
||||
struct apic __refdata apic_x2apic_uv_x = {
|
||||
|
||||
.name = "UV large system",
|
||||
.probe = NULL,
|
||||
|
@ -7,6 +7,10 @@ ifdef CONFIG_FUNCTION_TRACER
|
||||
CFLAGS_REMOVE_common.o = -pg
|
||||
endif
|
||||
|
||||
# Make sure load_percpu_segment has no stackprotector
|
||||
nostackp := $(call cc-option, -fno-stack-protector)
|
||||
CFLAGS_common.o := $(nostackp)
|
||||
|
||||
obj-y := intel_cacheinfo.o addon_cpuid_features.o
|
||||
obj-y += proc.o capflags.o powerflags.o common.o
|
||||
obj-y += vmware.o hypervisor.o
|
||||
|
@ -400,6 +400,13 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
|
||||
level = cpuid_eax(1);
|
||||
if((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
|
||||
set_cpu_cap(c, X86_FEATURE_REP_GOOD);
|
||||
|
||||
/*
|
||||
* Some BIOSes incorrectly force this feature, but only K8
|
||||
* revision D (model = 0x14) and later actually support it.
|
||||
*/
|
||||
if (c->x86_model < 0x14)
|
||||
clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
|
||||
}
|
||||
if (c->x86 == 0x10 || c->x86 == 0x11)
|
||||
set_cpu_cap(c, X86_FEATURE_REP_GOOD);
|
||||
|
@ -59,7 +59,30 @@ void __init setup_cpu_local_masks(void)
|
||||
alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
|
||||
}
|
||||
|
||||
static const struct cpu_dev *this_cpu __cpuinitdata;
|
||||
static void __cpuinit default_init(struct cpuinfo_x86 *c)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
display_cacheinfo(c);
|
||||
#else
|
||||
/* Not much we can do here... */
|
||||
/* Check if at least it has cpuid */
|
||||
if (c->cpuid_level == -1) {
|
||||
/* No cpuid. It must be an ancient CPU */
|
||||
if (c->x86 == 4)
|
||||
strcpy(c->x86_model_id, "486");
|
||||
else if (c->x86 == 3)
|
||||
strcpy(c->x86_model_id, "386");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static const struct cpu_dev __cpuinitconst default_cpu = {
|
||||
.c_init = default_init,
|
||||
.c_vendor = "Unknown",
|
||||
.c_x86_vendor = X86_VENDOR_UNKNOWN,
|
||||
};
|
||||
|
||||
static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
|
||||
|
||||
DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
|
||||
#ifdef CONFIG_X86_64
|
||||
@ -332,29 +355,6 @@ void switch_to_new_gdt(int cpu)
|
||||
|
||||
static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {};
|
||||
|
||||
static void __cpuinit default_init(struct cpuinfo_x86 *c)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
display_cacheinfo(c);
|
||||
#else
|
||||
/* Not much we can do here... */
|
||||
/* Check if at least it has cpuid */
|
||||
if (c->cpuid_level == -1) {
|
||||
/* No cpuid. It must be an ancient CPU */
|
||||
if (c->x86 == 4)
|
||||
strcpy(c->x86_model_id, "486");
|
||||
else if (c->x86 == 3)
|
||||
strcpy(c->x86_model_id, "386");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static const struct cpu_dev __cpuinitconst default_cpu = {
|
||||
.c_init = default_init,
|
||||
.c_vendor = "Unknown",
|
||||
.c_x86_vendor = X86_VENDOR_UNKNOWN,
|
||||
};
|
||||
|
||||
static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
|
||||
{
|
||||
unsigned int *v;
|
||||
|
@ -1226,8 +1226,13 @@ static void mce_init(void)
|
||||
}
|
||||
|
||||
/* Add per CPU specific workarounds here */
|
||||
static void mce_cpu_quirks(struct cpuinfo_x86 *c)
|
||||
static int mce_cpu_quirks(struct cpuinfo_x86 *c)
|
||||
{
|
||||
if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
|
||||
pr_info("MCE: unknown CPU type - not enabling MCE support.\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/* This should be disabled by the BIOS, but isn't always */
|
||||
if (c->x86_vendor == X86_VENDOR_AMD) {
|
||||
if (c->x86 == 15 && banks > 4) {
|
||||
@ -1273,11 +1278,20 @@ static void mce_cpu_quirks(struct cpuinfo_x86 *c)
|
||||
if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) &&
|
||||
monarch_timeout < 0)
|
||||
monarch_timeout = USEC_PER_SEC;
|
||||
|
||||
/*
|
||||
* There are also broken BIOSes on some Pentium M and
|
||||
* earlier systems:
|
||||
*/
|
||||
if (c->x86 == 6 && c->x86_model <= 13 && mce_bootlog < 0)
|
||||
mce_bootlog = 0;
|
||||
}
|
||||
if (monarch_timeout < 0)
|
||||
monarch_timeout = 0;
|
||||
if (mce_bootlog != 0)
|
||||
mce_panic_timeout = 30;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __cpuinit mce_ancient_init(struct cpuinfo_x86 *c)
|
||||
@ -1338,11 +1352,10 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
|
||||
if (!mce_available(c))
|
||||
return;
|
||||
|
||||
if (mce_cap_init() < 0) {
|
||||
if (mce_cap_init() < 0 || mce_cpu_quirks(c) < 0) {
|
||||
mce_disabled = 1;
|
||||
return;
|
||||
}
|
||||
mce_cpu_quirks(c);
|
||||
|
||||
machine_check_vector = do_machine_check;
|
||||
|
||||
|
@ -36,6 +36,7 @@
|
||||
|
||||
static DEFINE_PER_CPU(__u64, next_check) = INITIAL_JIFFIES;
|
||||
static DEFINE_PER_CPU(unsigned long, thermal_throttle_count);
|
||||
static DEFINE_PER_CPU(bool, thermal_throttle_active);
|
||||
|
||||
static atomic_t therm_throt_en = ATOMIC_INIT(0);
|
||||
|
||||
@ -96,27 +97,33 @@ static int therm_throt_process(int curr)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
__u64 tmp_jiffs = get_jiffies_64();
|
||||
bool was_throttled = __get_cpu_var(thermal_throttle_active);
|
||||
bool is_throttled = __get_cpu_var(thermal_throttle_active) = curr;
|
||||
|
||||
if (curr)
|
||||
if (is_throttled)
|
||||
__get_cpu_var(thermal_throttle_count)++;
|
||||
|
||||
if (time_before64(tmp_jiffs, __get_cpu_var(next_check)))
|
||||
if (!(was_throttled ^ is_throttled) &&
|
||||
time_before64(tmp_jiffs, __get_cpu_var(next_check)))
|
||||
return 0;
|
||||
|
||||
__get_cpu_var(next_check) = tmp_jiffs + CHECK_INTERVAL;
|
||||
|
||||
/* if we just entered the thermal event */
|
||||
if (curr) {
|
||||
if (is_throttled) {
|
||||
printk(KERN_CRIT "CPU%d: Temperature above threshold, "
|
||||
"cpu clock throttled (total events = %lu)\n", cpu,
|
||||
__get_cpu_var(thermal_throttle_count));
|
||||
"cpu clock throttled (total events = %lu)\n",
|
||||
cpu, __get_cpu_var(thermal_throttle_count));
|
||||
|
||||
add_taint(TAINT_MACHINE_CHECK);
|
||||
} else {
|
||||
printk(KERN_CRIT "CPU%d: Temperature/speed normal\n", cpu);
|
||||
return 1;
|
||||
}
|
||||
if (was_throttled) {
|
||||
printk(KERN_INFO "CPU%d: Temperature/speed normal\n", cpu);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SYSFS
|
||||
|
@ -55,6 +55,7 @@ struct x86_pmu {
|
||||
int num_counters_fixed;
|
||||
int counter_bits;
|
||||
u64 counter_mask;
|
||||
int apic;
|
||||
u64 max_period;
|
||||
u64 intel_ctrl;
|
||||
};
|
||||
@ -72,8 +73,8 @@ static const u64 p6_perfmon_event_map[] =
|
||||
{
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = 0x0079,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0000,
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = 0x0000,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e,
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = 0x012e,
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
|
||||
[PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
|
||||
@ -613,6 +614,7 @@ static DEFINE_MUTEX(pmc_reserve_mutex);
|
||||
|
||||
static bool reserve_pmc_hardware(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
int i;
|
||||
|
||||
if (nmi_watchdog == NMI_LOCAL_APIC)
|
||||
@ -627,9 +629,11 @@ static bool reserve_pmc_hardware(void)
|
||||
if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
|
||||
goto eventsel_fail;
|
||||
}
|
||||
#endif
|
||||
|
||||
return true;
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
eventsel_fail:
|
||||
for (i--; i >= 0; i--)
|
||||
release_evntsel_nmi(x86_pmu.eventsel + i);
|
||||
@ -644,10 +648,12 @@ perfctr_fail:
|
||||
enable_lapic_nmi_watchdog();
|
||||
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void release_pmc_hardware(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
int i;
|
||||
|
||||
for (i = 0; i < x86_pmu.num_counters; i++) {
|
||||
@ -657,6 +663,7 @@ static void release_pmc_hardware(void)
|
||||
|
||||
if (nmi_watchdog == NMI_LOCAL_APIC)
|
||||
enable_lapic_nmi_watchdog();
|
||||
#endif
|
||||
}
|
||||
|
||||
static void hw_perf_counter_destroy(struct perf_counter *counter)
|
||||
@ -748,6 +755,15 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
|
||||
hwc->sample_period = x86_pmu.max_period;
|
||||
hwc->last_period = hwc->sample_period;
|
||||
atomic64_set(&hwc->period_left, hwc->sample_period);
|
||||
} else {
|
||||
/*
|
||||
* If we have a PMU initialized but no APIC
|
||||
* interrupts, we cannot sample hardware
|
||||
* counters (user-space has to fall back and
|
||||
* sample via a hrtimer based software counter):
|
||||
*/
|
||||
if (!x86_pmu.apic)
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
counter->destroy = hw_perf_counter_destroy;
|
||||
@ -1449,18 +1465,22 @@ void smp_perf_pending_interrupt(struct pt_regs *regs)
|
||||
|
||||
void set_perf_counter_pending(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
apic->send_IPI_self(LOCAL_PENDING_VECTOR);
|
||||
#endif
|
||||
}
|
||||
|
||||
void perf_counters_lapic_init(void)
|
||||
{
|
||||
if (!x86_pmu_initialized())
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
if (!x86_pmu.apic || !x86_pmu_initialized())
|
||||
return;
|
||||
|
||||
/*
|
||||
* Always use NMI for PMU
|
||||
*/
|
||||
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int __kprobes
|
||||
@ -1484,7 +1504,9 @@ perf_counter_nmi_handler(struct notifier_block *self,
|
||||
|
||||
regs = args->regs;
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
apic_write(APIC_LVTPC, APIC_DM_NMI);
|
||||
#endif
|
||||
/*
|
||||
* Can't rely on the handled return value to say it was our NMI, two
|
||||
* counters could trigger 'simultaneously' raising two back-to-back NMIs.
|
||||
@ -1515,6 +1537,7 @@ static struct x86_pmu p6_pmu = {
|
||||
.event_map = p6_pmu_event_map,
|
||||
.raw_event = p6_pmu_raw_event,
|
||||
.max_events = ARRAY_SIZE(p6_perfmon_event_map),
|
||||
.apic = 1,
|
||||
.max_period = (1ULL << 31) - 1,
|
||||
.version = 0,
|
||||
.num_counters = 2,
|
||||
@ -1541,6 +1564,7 @@ static struct x86_pmu intel_pmu = {
|
||||
.event_map = intel_pmu_event_map,
|
||||
.raw_event = intel_pmu_raw_event,
|
||||
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
|
||||
.apic = 1,
|
||||
/*
|
||||
* Intel PMCs cannot be accessed sanely above 32 bit width,
|
||||
* so we install an artificial 1<<31 period regardless of
|
||||
@ -1564,6 +1588,7 @@ static struct x86_pmu amd_pmu = {
|
||||
.num_counters = 4,
|
||||
.counter_bits = 48,
|
||||
.counter_mask = (1ULL << 48) - 1,
|
||||
.apic = 1,
|
||||
/* use highest bit to detect overflow */
|
||||
.max_period = (1ULL << 47) - 1,
|
||||
};
|
||||
@ -1589,12 +1614,13 @@ static int p6_pmu_init(void)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (!cpu_has_apic) {
|
||||
pr_info("no Local APIC, try rebooting with lapic");
|
||||
return -ENODEV;
|
||||
}
|
||||
x86_pmu = p6_pmu;
|
||||
|
||||
x86_pmu = p6_pmu;
|
||||
if (!cpu_has_apic) {
|
||||
pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
|
||||
pr_info("no hardware sampling interrupt available.\n");
|
||||
x86_pmu.apic = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -261,9 +261,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
|
||||
* which will be freed later
|
||||
*/
|
||||
|
||||
#ifndef CONFIG_HOTPLUG_CPU
|
||||
.section .init.text,"ax",@progbits
|
||||
#endif
|
||||
__CPUINIT
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
ENTRY(startup_32_smp)
|
||||
@ -602,11 +600,7 @@ ignore_int:
|
||||
#endif
|
||||
iret
|
||||
|
||||
#ifndef CONFIG_HOTPLUG_CPU
|
||||
__CPUINITDATA
|
||||
#else
|
||||
__REFDATA
|
||||
#endif
|
||||
.align 4
|
||||
ENTRY(initial_code)
|
||||
.long i386_start_kernel
|
||||
|
@ -519,16 +519,12 @@ static void c1e_idle(void)
|
||||
if (!cpumask_test_cpu(cpu, c1e_mask)) {
|
||||
cpumask_set_cpu(cpu, c1e_mask);
|
||||
/*
|
||||
* Force broadcast so ACPI can not interfere. Needs
|
||||
* to run with interrupts enabled as it uses
|
||||
* smp_function_call.
|
||||
* Force broadcast so ACPI can not interfere.
|
||||
*/
|
||||
local_irq_enable();
|
||||
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
|
||||
&cpu);
|
||||
printk(KERN_INFO "Switch to broadcast mode on CPU%d\n",
|
||||
cpu);
|
||||
local_irq_disable();
|
||||
}
|
||||
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
|
||||
|
||||
|
@ -418,20 +418,20 @@ static int __init set_pci_reboot(const struct dmi_system_id *d)
|
||||
}
|
||||
|
||||
static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
|
||||
{ /* Handle problems with rebooting on Apple MacBook5,2 */
|
||||
{ /* Handle problems with rebooting on Apple MacBook5 */
|
||||
.callback = set_pci_reboot,
|
||||
.ident = "Apple MacBook",
|
||||
.ident = "Apple MacBook5",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5,2"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"),
|
||||
},
|
||||
},
|
||||
{ /* Handle problems with rebooting on Apple MacBookPro5,1 */
|
||||
{ /* Handle problems with rebooting on Apple MacBookPro5 */
|
||||
.callback = set_pci_reboot,
|
||||
.ident = "Apple MacBookPro5,1",
|
||||
.ident = "Apple MacBookPro5",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,1"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5"),
|
||||
},
|
||||
},
|
||||
{ }
|
||||
|
@ -165,7 +165,7 @@ static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
|
||||
|
||||
if (!chosen) {
|
||||
size_t vm_size = VMALLOC_END - VMALLOC_START;
|
||||
size_t tot_size = num_possible_cpus() * PMD_SIZE;
|
||||
size_t tot_size = nr_cpu_ids * PMD_SIZE;
|
||||
|
||||
/* on non-NUMA, embedding is better */
|
||||
if (!pcpu_need_numa())
|
||||
@ -199,7 +199,7 @@ static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
|
||||
dyn_size = pcpul_size - static_size - PERCPU_FIRST_CHUNK_RESERVE;
|
||||
|
||||
/* allocate pointer array and alloc large pages */
|
||||
map_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpul_map[0]));
|
||||
map_size = PFN_ALIGN(nr_cpu_ids * sizeof(pcpul_map[0]));
|
||||
pcpul_map = alloc_bootmem(map_size);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
@ -228,7 +228,7 @@ static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
|
||||
|
||||
/* allocate address and map */
|
||||
pcpul_vm.flags = VM_ALLOC;
|
||||
pcpul_vm.size = num_possible_cpus() * PMD_SIZE;
|
||||
pcpul_vm.size = nr_cpu_ids * PMD_SIZE;
|
||||
vm_area_register_early(&pcpul_vm, PMD_SIZE);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
@ -250,8 +250,8 @@ static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
|
||||
PMD_SIZE, pcpul_vm.addr, NULL);
|
||||
|
||||
/* sort pcpul_map array for pcpu_lpage_remapped() */
|
||||
for (i = 0; i < num_possible_cpus() - 1; i++)
|
||||
for (j = i + 1; j < num_possible_cpus(); j++)
|
||||
for (i = 0; i < nr_cpu_ids - 1; i++)
|
||||
for (j = i + 1; j < nr_cpu_ids; j++)
|
||||
if (pcpul_map[i].ptr > pcpul_map[j].ptr) {
|
||||
struct pcpul_ent tmp = pcpul_map[i];
|
||||
pcpul_map[i] = pcpul_map[j];
|
||||
@ -288,7 +288,7 @@ void *pcpu_lpage_remapped(void *kaddr)
|
||||
{
|
||||
void *pmd_addr = (void *)((unsigned long)kaddr & PMD_MASK);
|
||||
unsigned long offset = (unsigned long)kaddr & ~PMD_MASK;
|
||||
int left = 0, right = num_possible_cpus() - 1;
|
||||
int left = 0, right = nr_cpu_ids - 1;
|
||||
int pos;
|
||||
|
||||
/* pcpul in use at all? */
|
||||
@ -377,7 +377,7 @@ static ssize_t __init setup_pcpu_4k(size_t static_size)
|
||||
pcpu4k_nr_static_pages = PFN_UP(static_size);
|
||||
|
||||
/* unaligned allocations can't be freed, round up to page size */
|
||||
pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * num_possible_cpus()
|
||||
pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * nr_cpu_ids
|
||||
* sizeof(pcpu4k_pages[0]));
|
||||
pcpu4k_pages = alloc_bootmem(pages_size);
|
||||
|
||||
|
@ -744,6 +744,7 @@ uv_activation_descriptor_init(int node, int pnode)
|
||||
* note that base_dest_nodeid is actually a nasid.
|
||||
*/
|
||||
ad2->header.base_dest_nodeid = uv_partition_base_pnode << 1;
|
||||
ad2->header.dest_subnodeid = 0x10; /* the LB */
|
||||
ad2->header.command = UV_NET_ENDPOINT_INTD;
|
||||
ad2->header.int_both = 1;
|
||||
/*
|
||||
|
@ -46,11 +46,10 @@ PHDRS {
|
||||
data PT_LOAD FLAGS(7); /* RWE */
|
||||
#ifdef CONFIG_X86_64
|
||||
user PT_LOAD FLAGS(7); /* RWE */
|
||||
data.init PT_LOAD FLAGS(7); /* RWE */
|
||||
#ifdef CONFIG_SMP
|
||||
percpu PT_LOAD FLAGS(7); /* RWE */
|
||||
#endif
|
||||
data.init2 PT_LOAD FLAGS(7); /* RWE */
|
||||
init PT_LOAD FLAGS(7); /* RWE */
|
||||
#endif
|
||||
note PT_NOTE FLAGS(0); /* ___ */
|
||||
}
|
||||
@ -103,65 +102,43 @@ SECTIONS
|
||||
__stop___ex_table = .;
|
||||
} :text = 0x9090
|
||||
|
||||
RODATA
|
||||
RO_DATA(PAGE_SIZE)
|
||||
|
||||
/* Data */
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
.data : AT(ADDR(.data) - LOAD_OFFSET) {
|
||||
/* Start of data section */
|
||||
_sdata = .;
|
||||
|
||||
/* init_task */
|
||||
INIT_TASK_DATA(THREAD_SIZE)
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/* 32 bit has nosave before _edata */
|
||||
NOSAVE_DATA
|
||||
#endif
|
||||
|
||||
PAGE_ALIGNED_DATA(PAGE_SIZE)
|
||||
*(.data.idt)
|
||||
|
||||
CACHELINE_ALIGNED_DATA(CONFIG_X86_L1_CACHE_BYTES)
|
||||
|
||||
DATA_DATA
|
||||
CONSTRUCTORS
|
||||
} :data
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/* 32 bit has nosave before _edata */
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
|
||||
__nosave_begin = .;
|
||||
*(.data.nosave)
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__nosave_end = .;
|
||||
}
|
||||
#endif
|
||||
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
.data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
|
||||
*(.data.page_aligned)
|
||||
*(.data.idt)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
. = ALIGN(32);
|
||||
#else
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
. = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
|
||||
#endif
|
||||
.data.cacheline_aligned :
|
||||
AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
|
||||
*(.data.cacheline_aligned)
|
||||
}
|
||||
|
||||
/* rarely changed data like cpu maps */
|
||||
#ifdef CONFIG_X86_32
|
||||
. = ALIGN(32);
|
||||
#else
|
||||
. = ALIGN(CONFIG_X86_INTERNODE_CACHE_BYTES);
|
||||
#endif
|
||||
.data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
|
||||
*(.data.read_mostly)
|
||||
/* rarely changed data like cpu maps */
|
||||
READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
|
||||
|
||||
/* End of data section */
|
||||
_edata = .;
|
||||
}
|
||||
} :data
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
#define VSYSCALL_ADDR (-10*1024*1024)
|
||||
#define VSYSCALL_PHYS_ADDR ((LOADADDR(.data.read_mostly) + \
|
||||
SIZEOF(.data.read_mostly) + 4095) & ~(4095))
|
||||
#define VSYSCALL_VIRT_ADDR ((ADDR(.data.read_mostly) + \
|
||||
SIZEOF(.data.read_mostly) + 4095) & ~(4095))
|
||||
#define VSYSCALL_PHYS_ADDR ((LOADADDR(.data) + SIZEOF(.data) + \
|
||||
PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))
|
||||
#define VSYSCALL_VIRT_ADDR ((ADDR(.data) + SIZEOF(.data) + \
|
||||
PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))
|
||||
|
||||
#define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR)
|
||||
#define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
|
||||
@ -227,35 +204,29 @@ SECTIONS
|
||||
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
/* init_task */
|
||||
. = ALIGN(THREAD_SIZE);
|
||||
.data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
|
||||
*(.data.init_task)
|
||||
}
|
||||
#ifdef CONFIG_X86_64
|
||||
:data.init
|
||||
#endif
|
||||
|
||||
/*
|
||||
* smp_locks might be freed after init
|
||||
* start/end must be page aligned
|
||||
*/
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
.smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
|
||||
__smp_locks = .;
|
||||
*(.smp_locks)
|
||||
__smp_locks_end = .;
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
}
|
||||
|
||||
/* Init code and data - will be freed after init */
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
.init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
|
||||
.init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
|
||||
__init_begin = .; /* paired with __init_end */
|
||||
}
|
||||
|
||||
#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
|
||||
/*
|
||||
* percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
|
||||
* output PHDR, so the next output section - .init.text - should
|
||||
* start another segment - init.
|
||||
*/
|
||||
PERCPU_VADDR(0, :percpu)
|
||||
#endif
|
||||
|
||||
.init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
|
||||
_sinittext = .;
|
||||
INIT_TEXT
|
||||
_einittext = .;
|
||||
}
|
||||
#ifdef CONFIG_X86_64
|
||||
:init
|
||||
#endif
|
||||
|
||||
.init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
|
||||
INIT_DATA
|
||||
@ -326,17 +297,7 @@ SECTIONS
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
|
||||
/*
|
||||
* percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
|
||||
* output PHDR, so the next output section - __data_nosave - should
|
||||
* start another section data.init2. Also, pda should be at the head of
|
||||
* percpu area. Preallocate it and define the percpu offset symbol
|
||||
* so that it can be accessed as a percpu variable.
|
||||
*/
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
PERCPU_VADDR(0, :percpu)
|
||||
#else
|
||||
#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
|
||||
PERCPU(PAGE_SIZE)
|
||||
#endif
|
||||
|
||||
@ -347,15 +308,22 @@ SECTIONS
|
||||
__init_end = .;
|
||||
}
|
||||
|
||||
/*
|
||||
* smp_locks might be freed after init
|
||||
* start/end must be page aligned
|
||||
*/
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
.smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
|
||||
__smp_locks = .;
|
||||
*(.smp_locks)
|
||||
__smp_locks_end = .;
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__nosave_begin = .;
|
||||
*(.data.nosave)
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__nosave_end = .;
|
||||
} :data.init2
|
||||
/* use another section data.init2, see PERCPU_VADDR() above */
|
||||
NOSAVE_DATA
|
||||
}
|
||||
#endif
|
||||
|
||||
/* BSS */
|
||||
|
Reference in New Issue
Block a user